1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * VGIC system registers handling functions for AArch64 mode
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include <linux/irqchip/arm-gic-v3.h>
7*4882a593Smuzhiyun #include <linux/kvm.h>
8*4882a593Smuzhiyun #include <linux/kvm_host.h>
9*4882a593Smuzhiyun #include <asm/kvm_emulate.h>
10*4882a593Smuzhiyun #include "vgic/vgic.h"
11*4882a593Smuzhiyun #include "sys_regs.h"
12*4882a593Smuzhiyun
access_gic_ctlr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)13*4882a593Smuzhiyun static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
14*4882a593Smuzhiyun const struct sys_reg_desc *r)
15*4882a593Smuzhiyun {
16*4882a593Smuzhiyun u32 host_pri_bits, host_id_bits, host_seis, host_a3v, seis, a3v;
17*4882a593Smuzhiyun struct vgic_cpu *vgic_v3_cpu = &vcpu->arch.vgic_cpu;
18*4882a593Smuzhiyun struct vgic_vmcr vmcr;
19*4882a593Smuzhiyun u64 val;
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun vgic_get_vmcr(vcpu, &vmcr);
22*4882a593Smuzhiyun if (p->is_write) {
23*4882a593Smuzhiyun val = p->regval;
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun * Disallow restoring VM state if not supported by this
27*4882a593Smuzhiyun * hardware.
28*4882a593Smuzhiyun */
29*4882a593Smuzhiyun host_pri_bits = ((val & ICC_CTLR_EL1_PRI_BITS_MASK) >>
30*4882a593Smuzhiyun ICC_CTLR_EL1_PRI_BITS_SHIFT) + 1;
31*4882a593Smuzhiyun if (host_pri_bits > vgic_v3_cpu->num_pri_bits)
32*4882a593Smuzhiyun return false;
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun vgic_v3_cpu->num_pri_bits = host_pri_bits;
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun host_id_bits = (val & ICC_CTLR_EL1_ID_BITS_MASK) >>
37*4882a593Smuzhiyun ICC_CTLR_EL1_ID_BITS_SHIFT;
38*4882a593Smuzhiyun if (host_id_bits > vgic_v3_cpu->num_id_bits)
39*4882a593Smuzhiyun return false;
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun vgic_v3_cpu->num_id_bits = host_id_bits;
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun host_seis = ((kvm_vgic_global_state.ich_vtr_el2 &
44*4882a593Smuzhiyun ICH_VTR_SEIS_MASK) >> ICH_VTR_SEIS_SHIFT);
45*4882a593Smuzhiyun seis = (val & ICC_CTLR_EL1_SEIS_MASK) >>
46*4882a593Smuzhiyun ICC_CTLR_EL1_SEIS_SHIFT;
47*4882a593Smuzhiyun if (host_seis != seis)
48*4882a593Smuzhiyun return false;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun host_a3v = ((kvm_vgic_global_state.ich_vtr_el2 &
51*4882a593Smuzhiyun ICH_VTR_A3V_MASK) >> ICH_VTR_A3V_SHIFT);
52*4882a593Smuzhiyun a3v = (val & ICC_CTLR_EL1_A3V_MASK) >> ICC_CTLR_EL1_A3V_SHIFT;
53*4882a593Smuzhiyun if (host_a3v != a3v)
54*4882a593Smuzhiyun return false;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun /*
57*4882a593Smuzhiyun * Here set VMCR.CTLR in ICC_CTLR_EL1 layout.
58*4882a593Smuzhiyun * The vgic_set_vmcr() will convert to ICH_VMCR layout.
59*4882a593Smuzhiyun */
60*4882a593Smuzhiyun vmcr.cbpr = (val & ICC_CTLR_EL1_CBPR_MASK) >> ICC_CTLR_EL1_CBPR_SHIFT;
61*4882a593Smuzhiyun vmcr.eoim = (val & ICC_CTLR_EL1_EOImode_MASK) >> ICC_CTLR_EL1_EOImode_SHIFT;
62*4882a593Smuzhiyun vgic_set_vmcr(vcpu, &vmcr);
63*4882a593Smuzhiyun } else {
64*4882a593Smuzhiyun val = 0;
65*4882a593Smuzhiyun val |= (vgic_v3_cpu->num_pri_bits - 1) <<
66*4882a593Smuzhiyun ICC_CTLR_EL1_PRI_BITS_SHIFT;
67*4882a593Smuzhiyun val |= vgic_v3_cpu->num_id_bits << ICC_CTLR_EL1_ID_BITS_SHIFT;
68*4882a593Smuzhiyun val |= ((kvm_vgic_global_state.ich_vtr_el2 &
69*4882a593Smuzhiyun ICH_VTR_SEIS_MASK) >> ICH_VTR_SEIS_SHIFT) <<
70*4882a593Smuzhiyun ICC_CTLR_EL1_SEIS_SHIFT;
71*4882a593Smuzhiyun val |= ((kvm_vgic_global_state.ich_vtr_el2 &
72*4882a593Smuzhiyun ICH_VTR_A3V_MASK) >> ICH_VTR_A3V_SHIFT) <<
73*4882a593Smuzhiyun ICC_CTLR_EL1_A3V_SHIFT;
74*4882a593Smuzhiyun /*
75*4882a593Smuzhiyun * The VMCR.CTLR value is in ICC_CTLR_EL1 layout.
76*4882a593Smuzhiyun * Extract it directly using ICC_CTLR_EL1 reg definitions.
77*4882a593Smuzhiyun */
78*4882a593Smuzhiyun val |= (vmcr.cbpr << ICC_CTLR_EL1_CBPR_SHIFT) & ICC_CTLR_EL1_CBPR_MASK;
79*4882a593Smuzhiyun val |= (vmcr.eoim << ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun p->regval = val;
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun return true;
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun
access_gic_pmr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)87*4882a593Smuzhiyun static bool access_gic_pmr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
88*4882a593Smuzhiyun const struct sys_reg_desc *r)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun struct vgic_vmcr vmcr;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun vgic_get_vmcr(vcpu, &vmcr);
93*4882a593Smuzhiyun if (p->is_write) {
94*4882a593Smuzhiyun vmcr.pmr = (p->regval & ICC_PMR_EL1_MASK) >> ICC_PMR_EL1_SHIFT;
95*4882a593Smuzhiyun vgic_set_vmcr(vcpu, &vmcr);
96*4882a593Smuzhiyun } else {
97*4882a593Smuzhiyun p->regval = (vmcr.pmr << ICC_PMR_EL1_SHIFT) & ICC_PMR_EL1_MASK;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun return true;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
access_gic_bpr0(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)103*4882a593Smuzhiyun static bool access_gic_bpr0(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
104*4882a593Smuzhiyun const struct sys_reg_desc *r)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun struct vgic_vmcr vmcr;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun vgic_get_vmcr(vcpu, &vmcr);
109*4882a593Smuzhiyun if (p->is_write) {
110*4882a593Smuzhiyun vmcr.bpr = (p->regval & ICC_BPR0_EL1_MASK) >>
111*4882a593Smuzhiyun ICC_BPR0_EL1_SHIFT;
112*4882a593Smuzhiyun vgic_set_vmcr(vcpu, &vmcr);
113*4882a593Smuzhiyun } else {
114*4882a593Smuzhiyun p->regval = (vmcr.bpr << ICC_BPR0_EL1_SHIFT) &
115*4882a593Smuzhiyun ICC_BPR0_EL1_MASK;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun return true;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
access_gic_bpr1(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)121*4882a593Smuzhiyun static bool access_gic_bpr1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
122*4882a593Smuzhiyun const struct sys_reg_desc *r)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun struct vgic_vmcr vmcr;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun if (!p->is_write)
127*4882a593Smuzhiyun p->regval = 0;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun vgic_get_vmcr(vcpu, &vmcr);
130*4882a593Smuzhiyun if (!vmcr.cbpr) {
131*4882a593Smuzhiyun if (p->is_write) {
132*4882a593Smuzhiyun vmcr.abpr = (p->regval & ICC_BPR1_EL1_MASK) >>
133*4882a593Smuzhiyun ICC_BPR1_EL1_SHIFT;
134*4882a593Smuzhiyun vgic_set_vmcr(vcpu, &vmcr);
135*4882a593Smuzhiyun } else {
136*4882a593Smuzhiyun p->regval = (vmcr.abpr << ICC_BPR1_EL1_SHIFT) &
137*4882a593Smuzhiyun ICC_BPR1_EL1_MASK;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun } else {
140*4882a593Smuzhiyun if (!p->is_write)
141*4882a593Smuzhiyun p->regval = min((vmcr.bpr + 1), 7U);
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun return true;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
access_gic_grpen0(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)147*4882a593Smuzhiyun static bool access_gic_grpen0(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
148*4882a593Smuzhiyun const struct sys_reg_desc *r)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun struct vgic_vmcr vmcr;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun vgic_get_vmcr(vcpu, &vmcr);
153*4882a593Smuzhiyun if (p->is_write) {
154*4882a593Smuzhiyun vmcr.grpen0 = (p->regval & ICC_IGRPEN0_EL1_MASK) >>
155*4882a593Smuzhiyun ICC_IGRPEN0_EL1_SHIFT;
156*4882a593Smuzhiyun vgic_set_vmcr(vcpu, &vmcr);
157*4882a593Smuzhiyun } else {
158*4882a593Smuzhiyun p->regval = (vmcr.grpen0 << ICC_IGRPEN0_EL1_SHIFT) &
159*4882a593Smuzhiyun ICC_IGRPEN0_EL1_MASK;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun return true;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
access_gic_grpen1(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)165*4882a593Smuzhiyun static bool access_gic_grpen1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
166*4882a593Smuzhiyun const struct sys_reg_desc *r)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun struct vgic_vmcr vmcr;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun vgic_get_vmcr(vcpu, &vmcr);
171*4882a593Smuzhiyun if (p->is_write) {
172*4882a593Smuzhiyun vmcr.grpen1 = (p->regval & ICC_IGRPEN1_EL1_MASK) >>
173*4882a593Smuzhiyun ICC_IGRPEN1_EL1_SHIFT;
174*4882a593Smuzhiyun vgic_set_vmcr(vcpu, &vmcr);
175*4882a593Smuzhiyun } else {
176*4882a593Smuzhiyun p->regval = (vmcr.grpen1 << ICC_IGRPEN1_EL1_SHIFT) &
177*4882a593Smuzhiyun ICC_IGRPEN1_EL1_MASK;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun return true;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
vgic_v3_access_apr_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,u8 apr,u8 idx)183*4882a593Smuzhiyun static void vgic_v3_access_apr_reg(struct kvm_vcpu *vcpu,
184*4882a593Smuzhiyun struct sys_reg_params *p, u8 apr, u8 idx)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
187*4882a593Smuzhiyun uint32_t *ap_reg;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun if (apr)
190*4882a593Smuzhiyun ap_reg = &vgicv3->vgic_ap1r[idx];
191*4882a593Smuzhiyun else
192*4882a593Smuzhiyun ap_reg = &vgicv3->vgic_ap0r[idx];
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun if (p->is_write)
195*4882a593Smuzhiyun *ap_reg = p->regval;
196*4882a593Smuzhiyun else
197*4882a593Smuzhiyun p->regval = *ap_reg;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
access_gic_aprn(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r,u8 apr)200*4882a593Smuzhiyun static bool access_gic_aprn(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
201*4882a593Smuzhiyun const struct sys_reg_desc *r, u8 apr)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun u8 idx = r->Op2 & 3;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun if (idx > vgic_v3_max_apr_idx(vcpu))
206*4882a593Smuzhiyun goto err;
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun vgic_v3_access_apr_reg(vcpu, p, apr, idx);
209*4882a593Smuzhiyun return true;
210*4882a593Smuzhiyun err:
211*4882a593Smuzhiyun if (!p->is_write)
212*4882a593Smuzhiyun p->regval = 0;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun return false;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
access_gic_ap0r(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)217*4882a593Smuzhiyun static bool access_gic_ap0r(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
218*4882a593Smuzhiyun const struct sys_reg_desc *r)
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun return access_gic_aprn(vcpu, p, r, 0);
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
access_gic_ap1r(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)224*4882a593Smuzhiyun static bool access_gic_ap1r(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
225*4882a593Smuzhiyun const struct sys_reg_desc *r)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun return access_gic_aprn(vcpu, p, r, 1);
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
access_gic_sre(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)230*4882a593Smuzhiyun static bool access_gic_sre(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
231*4882a593Smuzhiyun const struct sys_reg_desc *r)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /* Validate SRE bit */
236*4882a593Smuzhiyun if (p->is_write) {
237*4882a593Smuzhiyun if (!(p->regval & ICC_SRE_EL1_SRE))
238*4882a593Smuzhiyun return false;
239*4882a593Smuzhiyun } else {
240*4882a593Smuzhiyun p->regval = vgicv3->vgic_sre;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun return true;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun static const struct sys_reg_desc gic_v3_icc_reg_descs[] = {
246*4882a593Smuzhiyun { SYS_DESC(SYS_ICC_PMR_EL1), access_gic_pmr },
247*4882a593Smuzhiyun { SYS_DESC(SYS_ICC_BPR0_EL1), access_gic_bpr0 },
248*4882a593Smuzhiyun { SYS_DESC(SYS_ICC_AP0R0_EL1), access_gic_ap0r },
249*4882a593Smuzhiyun { SYS_DESC(SYS_ICC_AP0R1_EL1), access_gic_ap0r },
250*4882a593Smuzhiyun { SYS_DESC(SYS_ICC_AP0R2_EL1), access_gic_ap0r },
251*4882a593Smuzhiyun { SYS_DESC(SYS_ICC_AP0R3_EL1), access_gic_ap0r },
252*4882a593Smuzhiyun { SYS_DESC(SYS_ICC_AP1R0_EL1), access_gic_ap1r },
253*4882a593Smuzhiyun { SYS_DESC(SYS_ICC_AP1R1_EL1), access_gic_ap1r },
254*4882a593Smuzhiyun { SYS_DESC(SYS_ICC_AP1R2_EL1), access_gic_ap1r },
255*4882a593Smuzhiyun { SYS_DESC(SYS_ICC_AP1R3_EL1), access_gic_ap1r },
256*4882a593Smuzhiyun { SYS_DESC(SYS_ICC_BPR1_EL1), access_gic_bpr1 },
257*4882a593Smuzhiyun { SYS_DESC(SYS_ICC_CTLR_EL1), access_gic_ctlr },
258*4882a593Smuzhiyun { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
259*4882a593Smuzhiyun { SYS_DESC(SYS_ICC_IGRPEN0_EL1), access_gic_grpen0 },
260*4882a593Smuzhiyun { SYS_DESC(SYS_ICC_IGRPEN1_EL1), access_gic_grpen1 },
261*4882a593Smuzhiyun };
262*4882a593Smuzhiyun
vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu * vcpu,bool is_write,u64 id,u64 * reg)263*4882a593Smuzhiyun int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, bool is_write, u64 id,
264*4882a593Smuzhiyun u64 *reg)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun struct sys_reg_params params;
267*4882a593Smuzhiyun u64 sysreg = (id & KVM_DEV_ARM_VGIC_SYSREG_MASK) | KVM_REG_SIZE_U64;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun params.regval = *reg;
270*4882a593Smuzhiyun params.is_write = is_write;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun if (find_reg_by_id(sysreg, ¶ms, gic_v3_icc_reg_descs,
273*4882a593Smuzhiyun ARRAY_SIZE(gic_v3_icc_reg_descs)))
274*4882a593Smuzhiyun return 0;
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun return -ENXIO;
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun
vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu * vcpu,bool is_write,u64 id,u64 * reg)279*4882a593Smuzhiyun int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu, bool is_write, u64 id,
280*4882a593Smuzhiyun u64 *reg)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun struct sys_reg_params params;
283*4882a593Smuzhiyun const struct sys_reg_desc *r;
284*4882a593Smuzhiyun u64 sysreg = (id & KVM_DEV_ARM_VGIC_SYSREG_MASK) | KVM_REG_SIZE_U64;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun if (is_write)
287*4882a593Smuzhiyun params.regval = *reg;
288*4882a593Smuzhiyun params.is_write = is_write;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun r = find_reg_by_id(sysreg, ¶ms, gic_v3_icc_reg_descs,
291*4882a593Smuzhiyun ARRAY_SIZE(gic_v3_icc_reg_descs));
292*4882a593Smuzhiyun if (!r)
293*4882a593Smuzhiyun return -ENXIO;
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun if (!r->access(vcpu, ¶ms, r))
296*4882a593Smuzhiyun return -EINVAL;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun if (!is_write)
299*4882a593Smuzhiyun *reg = params.regval;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun return 0;
302*4882a593Smuzhiyun }
303