xref: /OK3568_Linux_fs/kernel/arch/arm64/kvm/vgic/vgic-mmio-v2.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * VGICv2 MMIO handling functions
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/irqchip/arm-gic.h>
7*4882a593Smuzhiyun #include <linux/kvm.h>
8*4882a593Smuzhiyun #include <linux/kvm_host.h>
9*4882a593Smuzhiyun #include <linux/nospec.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <kvm/iodev.h>
12*4882a593Smuzhiyun #include <kvm/arm_vgic.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include "vgic.h"
15*4882a593Smuzhiyun #include "vgic-mmio.h"
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun /*
18*4882a593Smuzhiyun  * The Revision field in the IIDR have the following meanings:
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  * Revision 1: Report GICv2 interrupts as group 0 instead of group 1
21*4882a593Smuzhiyun  * Revision 2: Interrupt groups are guest-configurable and signaled using
22*4882a593Smuzhiyun  * 	       their configured groups.
23*4882a593Smuzhiyun  */
24*4882a593Smuzhiyun 
vgic_mmio_read_v2_misc(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)25*4882a593Smuzhiyun static unsigned long vgic_mmio_read_v2_misc(struct kvm_vcpu *vcpu,
26*4882a593Smuzhiyun 					    gpa_t addr, unsigned int len)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun 	struct vgic_dist *vgic = &vcpu->kvm->arch.vgic;
29*4882a593Smuzhiyun 	u32 value;
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	switch (addr & 0x0c) {
32*4882a593Smuzhiyun 	case GIC_DIST_CTRL:
33*4882a593Smuzhiyun 		value = vgic->enabled ? GICD_ENABLE : 0;
34*4882a593Smuzhiyun 		break;
35*4882a593Smuzhiyun 	case GIC_DIST_CTR:
36*4882a593Smuzhiyun 		value = vgic->nr_spis + VGIC_NR_PRIVATE_IRQS;
37*4882a593Smuzhiyun 		value = (value >> 5) - 1;
38*4882a593Smuzhiyun 		value |= (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
39*4882a593Smuzhiyun 		break;
40*4882a593Smuzhiyun 	case GIC_DIST_IIDR:
41*4882a593Smuzhiyun 		value = (PRODUCT_ID_KVM << GICD_IIDR_PRODUCT_ID_SHIFT) |
42*4882a593Smuzhiyun 			(vgic->implementation_rev << GICD_IIDR_REVISION_SHIFT) |
43*4882a593Smuzhiyun 			(IMPLEMENTER_ARM << GICD_IIDR_IMPLEMENTER_SHIFT);
44*4882a593Smuzhiyun 		break;
45*4882a593Smuzhiyun 	default:
46*4882a593Smuzhiyun 		return 0;
47*4882a593Smuzhiyun 	}
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	return value;
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun 
vgic_mmio_write_v2_misc(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)52*4882a593Smuzhiyun static void vgic_mmio_write_v2_misc(struct kvm_vcpu *vcpu,
53*4882a593Smuzhiyun 				    gpa_t addr, unsigned int len,
54*4882a593Smuzhiyun 				    unsigned long val)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
57*4882a593Smuzhiyun 	bool was_enabled = dist->enabled;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	switch (addr & 0x0c) {
60*4882a593Smuzhiyun 	case GIC_DIST_CTRL:
61*4882a593Smuzhiyun 		dist->enabled = val & GICD_ENABLE;
62*4882a593Smuzhiyun 		if (!was_enabled && dist->enabled)
63*4882a593Smuzhiyun 			vgic_kick_vcpus(vcpu->kvm);
64*4882a593Smuzhiyun 		break;
65*4882a593Smuzhiyun 	case GIC_DIST_CTR:
66*4882a593Smuzhiyun 	case GIC_DIST_IIDR:
67*4882a593Smuzhiyun 		/* Nothing to do */
68*4882a593Smuzhiyun 		return;
69*4882a593Smuzhiyun 	}
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun 
vgic_mmio_uaccess_write_v2_misc(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)72*4882a593Smuzhiyun static int vgic_mmio_uaccess_write_v2_misc(struct kvm_vcpu *vcpu,
73*4882a593Smuzhiyun 					   gpa_t addr, unsigned int len,
74*4882a593Smuzhiyun 					   unsigned long val)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun 	switch (addr & 0x0c) {
77*4882a593Smuzhiyun 	case GIC_DIST_IIDR:
78*4882a593Smuzhiyun 		if (val != vgic_mmio_read_v2_misc(vcpu, addr, len))
79*4882a593Smuzhiyun 			return -EINVAL;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 		/*
82*4882a593Smuzhiyun 		 * If we observe a write to GICD_IIDR we know that userspace
83*4882a593Smuzhiyun 		 * has been updated and has had a chance to cope with older
84*4882a593Smuzhiyun 		 * kernels (VGICv2 IIDR.Revision == 0) incorrectly reporting
85*4882a593Smuzhiyun 		 * interrupts as group 1, and therefore we now allow groups to
86*4882a593Smuzhiyun 		 * be user writable.  Doing this by default would break
87*4882a593Smuzhiyun 		 * migration from old kernels to new kernels with legacy
88*4882a593Smuzhiyun 		 * userspace.
89*4882a593Smuzhiyun 		 */
90*4882a593Smuzhiyun 		vcpu->kvm->arch.vgic.v2_groups_user_writable = true;
91*4882a593Smuzhiyun 		return 0;
92*4882a593Smuzhiyun 	}
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	vgic_mmio_write_v2_misc(vcpu, addr, len, val);
95*4882a593Smuzhiyun 	return 0;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun 
vgic_mmio_uaccess_write_v2_group(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)98*4882a593Smuzhiyun static int vgic_mmio_uaccess_write_v2_group(struct kvm_vcpu *vcpu,
99*4882a593Smuzhiyun 					    gpa_t addr, unsigned int len,
100*4882a593Smuzhiyun 					    unsigned long val)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun 	if (vcpu->kvm->arch.vgic.v2_groups_user_writable)
103*4882a593Smuzhiyun 		vgic_mmio_write_group(vcpu, addr, len, val);
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	return 0;
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun 
vgic_mmio_write_sgir(struct kvm_vcpu * source_vcpu,gpa_t addr,unsigned int len,unsigned long val)108*4882a593Smuzhiyun static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
109*4882a593Smuzhiyun 				 gpa_t addr, unsigned int len,
110*4882a593Smuzhiyun 				 unsigned long val)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun 	int nr_vcpus = atomic_read(&source_vcpu->kvm->online_vcpus);
113*4882a593Smuzhiyun 	int intid = val & 0xf;
114*4882a593Smuzhiyun 	int targets = (val >> 16) & 0xff;
115*4882a593Smuzhiyun 	int mode = (val >> 24) & 0x03;
116*4882a593Smuzhiyun 	int c;
117*4882a593Smuzhiyun 	struct kvm_vcpu *vcpu;
118*4882a593Smuzhiyun 	unsigned long flags;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	switch (mode) {
121*4882a593Smuzhiyun 	case 0x0:		/* as specified by targets */
122*4882a593Smuzhiyun 		break;
123*4882a593Smuzhiyun 	case 0x1:
124*4882a593Smuzhiyun 		targets = (1U << nr_vcpus) - 1;			/* all, ... */
125*4882a593Smuzhiyun 		targets &= ~(1U << source_vcpu->vcpu_id);	/* but self */
126*4882a593Smuzhiyun 		break;
127*4882a593Smuzhiyun 	case 0x2:		/* this very vCPU only */
128*4882a593Smuzhiyun 		targets = (1U << source_vcpu->vcpu_id);
129*4882a593Smuzhiyun 		break;
130*4882a593Smuzhiyun 	case 0x3:		/* reserved */
131*4882a593Smuzhiyun 		return;
132*4882a593Smuzhiyun 	}
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	kvm_for_each_vcpu(c, vcpu, source_vcpu->kvm) {
135*4882a593Smuzhiyun 		struct vgic_irq *irq;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 		if (!(targets & (1U << c)))
138*4882a593Smuzhiyun 			continue;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 		irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 		raw_spin_lock_irqsave(&irq->irq_lock, flags);
143*4882a593Smuzhiyun 		irq->pending_latch = true;
144*4882a593Smuzhiyun 		irq->source |= 1U << source_vcpu->vcpu_id;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 		vgic_queue_irq_unlock(source_vcpu->kvm, irq, flags);
147*4882a593Smuzhiyun 		vgic_put_irq(source_vcpu->kvm, irq);
148*4882a593Smuzhiyun 	}
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun 
vgic_mmio_read_target(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)151*4882a593Smuzhiyun static unsigned long vgic_mmio_read_target(struct kvm_vcpu *vcpu,
152*4882a593Smuzhiyun 					   gpa_t addr, unsigned int len)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
155*4882a593Smuzhiyun 	int i;
156*4882a593Smuzhiyun 	u64 val = 0;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	for (i = 0; i < len; i++) {
159*4882a593Smuzhiyun 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 		val |= (u64)irq->targets << (i * 8);
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 		vgic_put_irq(vcpu->kvm, irq);
164*4882a593Smuzhiyun 	}
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	return val;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun 
vgic_mmio_write_target(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)169*4882a593Smuzhiyun static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
170*4882a593Smuzhiyun 				   gpa_t addr, unsigned int len,
171*4882a593Smuzhiyun 				   unsigned long val)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun 	u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
174*4882a593Smuzhiyun 	u8 cpu_mask = GENMASK(atomic_read(&vcpu->kvm->online_vcpus) - 1, 0);
175*4882a593Smuzhiyun 	int i;
176*4882a593Smuzhiyun 	unsigned long flags;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	/* GICD_ITARGETSR[0-7] are read-only */
179*4882a593Smuzhiyun 	if (intid < VGIC_NR_PRIVATE_IRQS)
180*4882a593Smuzhiyun 		return;
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	for (i = 0; i < len; i++) {
183*4882a593Smuzhiyun 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i);
184*4882a593Smuzhiyun 		int target;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 		raw_spin_lock_irqsave(&irq->irq_lock, flags);
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 		irq->targets = (val >> (i * 8)) & cpu_mask;
189*4882a593Smuzhiyun 		target = irq->targets ? __ffs(irq->targets) : 0;
190*4882a593Smuzhiyun 		irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
193*4882a593Smuzhiyun 		vgic_put_irq(vcpu->kvm, irq);
194*4882a593Smuzhiyun 	}
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun 
vgic_mmio_read_sgipend(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)197*4882a593Smuzhiyun static unsigned long vgic_mmio_read_sgipend(struct kvm_vcpu *vcpu,
198*4882a593Smuzhiyun 					    gpa_t addr, unsigned int len)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun 	u32 intid = addr & 0x0f;
201*4882a593Smuzhiyun 	int i;
202*4882a593Smuzhiyun 	u64 val = 0;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	for (i = 0; i < len; i++) {
205*4882a593Smuzhiyun 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 		val |= (u64)irq->source << (i * 8);
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 		vgic_put_irq(vcpu->kvm, irq);
210*4882a593Smuzhiyun 	}
211*4882a593Smuzhiyun 	return val;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun 
vgic_mmio_write_sgipendc(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)214*4882a593Smuzhiyun static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
215*4882a593Smuzhiyun 				     gpa_t addr, unsigned int len,
216*4882a593Smuzhiyun 				     unsigned long val)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun 	u32 intid = addr & 0x0f;
219*4882a593Smuzhiyun 	int i;
220*4882a593Smuzhiyun 	unsigned long flags;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	for (i = 0; i < len; i++) {
223*4882a593Smuzhiyun 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 		raw_spin_lock_irqsave(&irq->irq_lock, flags);
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 		irq->source &= ~((val >> (i * 8)) & 0xff);
228*4882a593Smuzhiyun 		if (!irq->source)
229*4882a593Smuzhiyun 			irq->pending_latch = false;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
232*4882a593Smuzhiyun 		vgic_put_irq(vcpu->kvm, irq);
233*4882a593Smuzhiyun 	}
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun 
vgic_mmio_write_sgipends(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)236*4882a593Smuzhiyun static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
237*4882a593Smuzhiyun 				     gpa_t addr, unsigned int len,
238*4882a593Smuzhiyun 				     unsigned long val)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun 	u32 intid = addr & 0x0f;
241*4882a593Smuzhiyun 	int i;
242*4882a593Smuzhiyun 	unsigned long flags;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	for (i = 0; i < len; i++) {
245*4882a593Smuzhiyun 		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 		raw_spin_lock_irqsave(&irq->irq_lock, flags);
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 		irq->source |= (val >> (i * 8)) & 0xff;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 		if (irq->source) {
252*4882a593Smuzhiyun 			irq->pending_latch = true;
253*4882a593Smuzhiyun 			vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
254*4882a593Smuzhiyun 		} else {
255*4882a593Smuzhiyun 			raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
256*4882a593Smuzhiyun 		}
257*4882a593Smuzhiyun 		vgic_put_irq(vcpu->kvm, irq);
258*4882a593Smuzhiyun 	}
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun #define GICC_ARCH_VERSION_V2	0x2
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun /* These are for userland accesses only, there is no guest-facing emulation. */
vgic_mmio_read_vcpuif(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)264*4882a593Smuzhiyun static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
265*4882a593Smuzhiyun 					   gpa_t addr, unsigned int len)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun 	struct vgic_vmcr vmcr;
268*4882a593Smuzhiyun 	u32 val;
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	vgic_get_vmcr(vcpu, &vmcr);
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	switch (addr & 0xff) {
273*4882a593Smuzhiyun 	case GIC_CPU_CTRL:
274*4882a593Smuzhiyun 		val = vmcr.grpen0 << GIC_CPU_CTRL_EnableGrp0_SHIFT;
275*4882a593Smuzhiyun 		val |= vmcr.grpen1 << GIC_CPU_CTRL_EnableGrp1_SHIFT;
276*4882a593Smuzhiyun 		val |= vmcr.ackctl << GIC_CPU_CTRL_AckCtl_SHIFT;
277*4882a593Smuzhiyun 		val |= vmcr.fiqen << GIC_CPU_CTRL_FIQEn_SHIFT;
278*4882a593Smuzhiyun 		val |= vmcr.cbpr << GIC_CPU_CTRL_CBPR_SHIFT;
279*4882a593Smuzhiyun 		val |= vmcr.eoim << GIC_CPU_CTRL_EOImodeNS_SHIFT;
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 		break;
282*4882a593Smuzhiyun 	case GIC_CPU_PRIMASK:
283*4882a593Smuzhiyun 		/*
284*4882a593Smuzhiyun 		 * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
285*4882a593Smuzhiyun 		 * the PMR field as GICH_VMCR.VMPriMask rather than
286*4882a593Smuzhiyun 		 * GICC_PMR.Priority, so we expose the upper five bits of
287*4882a593Smuzhiyun 		 * priority mask to userspace using the lower bits in the
288*4882a593Smuzhiyun 		 * unsigned long.
289*4882a593Smuzhiyun 		 */
290*4882a593Smuzhiyun 		val = (vmcr.pmr & GICV_PMR_PRIORITY_MASK) >>
291*4882a593Smuzhiyun 			GICV_PMR_PRIORITY_SHIFT;
292*4882a593Smuzhiyun 		break;
293*4882a593Smuzhiyun 	case GIC_CPU_BINPOINT:
294*4882a593Smuzhiyun 		val = vmcr.bpr;
295*4882a593Smuzhiyun 		break;
296*4882a593Smuzhiyun 	case GIC_CPU_ALIAS_BINPOINT:
297*4882a593Smuzhiyun 		val = vmcr.abpr;
298*4882a593Smuzhiyun 		break;
299*4882a593Smuzhiyun 	case GIC_CPU_IDENT:
300*4882a593Smuzhiyun 		val = ((PRODUCT_ID_KVM << 20) |
301*4882a593Smuzhiyun 		       (GICC_ARCH_VERSION_V2 << 16) |
302*4882a593Smuzhiyun 		       IMPLEMENTER_ARM);
303*4882a593Smuzhiyun 		break;
304*4882a593Smuzhiyun 	default:
305*4882a593Smuzhiyun 		return 0;
306*4882a593Smuzhiyun 	}
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	return val;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun 
vgic_mmio_write_vcpuif(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)311*4882a593Smuzhiyun static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
312*4882a593Smuzhiyun 				   gpa_t addr, unsigned int len,
313*4882a593Smuzhiyun 				   unsigned long val)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun 	struct vgic_vmcr vmcr;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	vgic_get_vmcr(vcpu, &vmcr);
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	switch (addr & 0xff) {
320*4882a593Smuzhiyun 	case GIC_CPU_CTRL:
321*4882a593Smuzhiyun 		vmcr.grpen0 = !!(val & GIC_CPU_CTRL_EnableGrp0);
322*4882a593Smuzhiyun 		vmcr.grpen1 = !!(val & GIC_CPU_CTRL_EnableGrp1);
323*4882a593Smuzhiyun 		vmcr.ackctl = !!(val & GIC_CPU_CTRL_AckCtl);
324*4882a593Smuzhiyun 		vmcr.fiqen = !!(val & GIC_CPU_CTRL_FIQEn);
325*4882a593Smuzhiyun 		vmcr.cbpr = !!(val & GIC_CPU_CTRL_CBPR);
326*4882a593Smuzhiyun 		vmcr.eoim = !!(val & GIC_CPU_CTRL_EOImodeNS);
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 		break;
329*4882a593Smuzhiyun 	case GIC_CPU_PRIMASK:
330*4882a593Smuzhiyun 		/*
331*4882a593Smuzhiyun 		 * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
332*4882a593Smuzhiyun 		 * the PMR field as GICH_VMCR.VMPriMask rather than
333*4882a593Smuzhiyun 		 * GICC_PMR.Priority, so we expose the upper five bits of
334*4882a593Smuzhiyun 		 * priority mask to userspace using the lower bits in the
335*4882a593Smuzhiyun 		 * unsigned long.
336*4882a593Smuzhiyun 		 */
337*4882a593Smuzhiyun 		vmcr.pmr = (val << GICV_PMR_PRIORITY_SHIFT) &
338*4882a593Smuzhiyun 			GICV_PMR_PRIORITY_MASK;
339*4882a593Smuzhiyun 		break;
340*4882a593Smuzhiyun 	case GIC_CPU_BINPOINT:
341*4882a593Smuzhiyun 		vmcr.bpr = val;
342*4882a593Smuzhiyun 		break;
343*4882a593Smuzhiyun 	case GIC_CPU_ALIAS_BINPOINT:
344*4882a593Smuzhiyun 		vmcr.abpr = val;
345*4882a593Smuzhiyun 		break;
346*4882a593Smuzhiyun 	}
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	vgic_set_vmcr(vcpu, &vmcr);
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun 
vgic_mmio_read_apr(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)351*4882a593Smuzhiyun static unsigned long vgic_mmio_read_apr(struct kvm_vcpu *vcpu,
352*4882a593Smuzhiyun 					gpa_t addr, unsigned int len)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun 	int n; /* which APRn is this */
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	n = (addr >> 2) & 0x3;
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	if (kvm_vgic_global_state.type == VGIC_V2) {
359*4882a593Smuzhiyun 		/* GICv2 hardware systems support max. 32 groups */
360*4882a593Smuzhiyun 		if (n != 0)
361*4882a593Smuzhiyun 			return 0;
362*4882a593Smuzhiyun 		return vcpu->arch.vgic_cpu.vgic_v2.vgic_apr;
363*4882a593Smuzhiyun 	} else {
364*4882a593Smuzhiyun 		struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 		if (n > vgic_v3_max_apr_idx(vcpu))
367*4882a593Smuzhiyun 			return 0;
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 		n = array_index_nospec(n, 4);
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 		/* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
372*4882a593Smuzhiyun 		return vgicv3->vgic_ap1r[n];
373*4882a593Smuzhiyun 	}
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun 
vgic_mmio_write_apr(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)376*4882a593Smuzhiyun static void vgic_mmio_write_apr(struct kvm_vcpu *vcpu,
377*4882a593Smuzhiyun 				gpa_t addr, unsigned int len,
378*4882a593Smuzhiyun 				unsigned long val)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun 	int n; /* which APRn is this */
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	n = (addr >> 2) & 0x3;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	if (kvm_vgic_global_state.type == VGIC_V2) {
385*4882a593Smuzhiyun 		/* GICv2 hardware systems support max. 32 groups */
386*4882a593Smuzhiyun 		if (n != 0)
387*4882a593Smuzhiyun 			return;
388*4882a593Smuzhiyun 		vcpu->arch.vgic_cpu.vgic_v2.vgic_apr = val;
389*4882a593Smuzhiyun 	} else {
390*4882a593Smuzhiyun 		struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 		if (n > vgic_v3_max_apr_idx(vcpu))
393*4882a593Smuzhiyun 			return;
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 		n = array_index_nospec(n, 4);
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 		/* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
398*4882a593Smuzhiyun 		vgicv3->vgic_ap1r[n] = val;
399*4882a593Smuzhiyun 	}
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun static const struct vgic_register_region vgic_v2_dist_registers[] = {
403*4882a593Smuzhiyun 	REGISTER_DESC_WITH_LENGTH_UACCESS(GIC_DIST_CTRL,
404*4882a593Smuzhiyun 		vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc,
405*4882a593Smuzhiyun 		NULL, vgic_mmio_uaccess_write_v2_misc,
406*4882a593Smuzhiyun 		12, VGIC_ACCESS_32bit),
407*4882a593Smuzhiyun 	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_IGROUP,
408*4882a593Smuzhiyun 		vgic_mmio_read_group, vgic_mmio_write_group,
409*4882a593Smuzhiyun 		NULL, vgic_mmio_uaccess_write_v2_group, 1,
410*4882a593Smuzhiyun 		VGIC_ACCESS_32bit),
411*4882a593Smuzhiyun 	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_SET,
412*4882a593Smuzhiyun 		vgic_mmio_read_enable, vgic_mmio_write_senable,
413*4882a593Smuzhiyun 		NULL, vgic_uaccess_write_senable, 1,
414*4882a593Smuzhiyun 		VGIC_ACCESS_32bit),
415*4882a593Smuzhiyun 	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_CLEAR,
416*4882a593Smuzhiyun 		vgic_mmio_read_enable, vgic_mmio_write_cenable,
417*4882a593Smuzhiyun 		NULL, vgic_uaccess_write_cenable, 1,
418*4882a593Smuzhiyun 		VGIC_ACCESS_32bit),
419*4882a593Smuzhiyun 	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET,
420*4882a593Smuzhiyun 		vgic_mmio_read_pending, vgic_mmio_write_spending,
421*4882a593Smuzhiyun 		vgic_uaccess_read_pending, vgic_uaccess_write_spending, 1,
422*4882a593Smuzhiyun 		VGIC_ACCESS_32bit),
423*4882a593Smuzhiyun 	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR,
424*4882a593Smuzhiyun 		vgic_mmio_read_pending, vgic_mmio_write_cpending,
425*4882a593Smuzhiyun 		vgic_uaccess_read_pending, vgic_uaccess_write_cpending, 1,
426*4882a593Smuzhiyun 		VGIC_ACCESS_32bit),
427*4882a593Smuzhiyun 	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET,
428*4882a593Smuzhiyun 		vgic_mmio_read_active, vgic_mmio_write_sactive,
429*4882a593Smuzhiyun 		vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 1,
430*4882a593Smuzhiyun 		VGIC_ACCESS_32bit),
431*4882a593Smuzhiyun 	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR,
432*4882a593Smuzhiyun 		vgic_mmio_read_active, vgic_mmio_write_cactive,
433*4882a593Smuzhiyun 		vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive, 1,
434*4882a593Smuzhiyun 		VGIC_ACCESS_32bit),
435*4882a593Smuzhiyun 	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI,
436*4882a593Smuzhiyun 		vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL,
437*4882a593Smuzhiyun 		8, VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
438*4882a593Smuzhiyun 	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_TARGET,
439*4882a593Smuzhiyun 		vgic_mmio_read_target, vgic_mmio_write_target, NULL, NULL, 8,
440*4882a593Smuzhiyun 		VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
441*4882a593Smuzhiyun 	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_CONFIG,
442*4882a593Smuzhiyun 		vgic_mmio_read_config, vgic_mmio_write_config, NULL, NULL, 2,
443*4882a593Smuzhiyun 		VGIC_ACCESS_32bit),
444*4882a593Smuzhiyun 	REGISTER_DESC_WITH_LENGTH(GIC_DIST_SOFTINT,
445*4882a593Smuzhiyun 		vgic_mmio_read_raz, vgic_mmio_write_sgir, 4,
446*4882a593Smuzhiyun 		VGIC_ACCESS_32bit),
447*4882a593Smuzhiyun 	REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_CLEAR,
448*4882a593Smuzhiyun 		vgic_mmio_read_sgipend, vgic_mmio_write_sgipendc, 16,
449*4882a593Smuzhiyun 		VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
450*4882a593Smuzhiyun 	REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_SET,
451*4882a593Smuzhiyun 		vgic_mmio_read_sgipend, vgic_mmio_write_sgipends, 16,
452*4882a593Smuzhiyun 		VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
453*4882a593Smuzhiyun };
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun static const struct vgic_register_region vgic_v2_cpu_registers[] = {
456*4882a593Smuzhiyun 	REGISTER_DESC_WITH_LENGTH(GIC_CPU_CTRL,
457*4882a593Smuzhiyun 		vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
458*4882a593Smuzhiyun 		VGIC_ACCESS_32bit),
459*4882a593Smuzhiyun 	REGISTER_DESC_WITH_LENGTH(GIC_CPU_PRIMASK,
460*4882a593Smuzhiyun 		vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
461*4882a593Smuzhiyun 		VGIC_ACCESS_32bit),
462*4882a593Smuzhiyun 	REGISTER_DESC_WITH_LENGTH(GIC_CPU_BINPOINT,
463*4882a593Smuzhiyun 		vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
464*4882a593Smuzhiyun 		VGIC_ACCESS_32bit),
465*4882a593Smuzhiyun 	REGISTER_DESC_WITH_LENGTH(GIC_CPU_ALIAS_BINPOINT,
466*4882a593Smuzhiyun 		vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
467*4882a593Smuzhiyun 		VGIC_ACCESS_32bit),
468*4882a593Smuzhiyun 	REGISTER_DESC_WITH_LENGTH(GIC_CPU_ACTIVEPRIO,
469*4882a593Smuzhiyun 		vgic_mmio_read_apr, vgic_mmio_write_apr, 16,
470*4882a593Smuzhiyun 		VGIC_ACCESS_32bit),
471*4882a593Smuzhiyun 	REGISTER_DESC_WITH_LENGTH(GIC_CPU_IDENT,
472*4882a593Smuzhiyun 		vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
473*4882a593Smuzhiyun 		VGIC_ACCESS_32bit),
474*4882a593Smuzhiyun };
475*4882a593Smuzhiyun 
vgic_v2_init_dist_iodev(struct vgic_io_device * dev)476*4882a593Smuzhiyun unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev)
477*4882a593Smuzhiyun {
478*4882a593Smuzhiyun 	dev->regions = vgic_v2_dist_registers;
479*4882a593Smuzhiyun 	dev->nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	kvm_iodevice_init(&dev->dev, &kvm_io_gic_ops);
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	return SZ_4K;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun 
vgic_v2_has_attr_regs(struct kvm_device * dev,struct kvm_device_attr * attr)486*4882a593Smuzhiyun int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
487*4882a593Smuzhiyun {
488*4882a593Smuzhiyun 	const struct vgic_register_region *region;
489*4882a593Smuzhiyun 	struct vgic_io_device iodev;
490*4882a593Smuzhiyun 	struct vgic_reg_attr reg_attr;
491*4882a593Smuzhiyun 	struct kvm_vcpu *vcpu;
492*4882a593Smuzhiyun 	gpa_t addr;
493*4882a593Smuzhiyun 	int ret;
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	ret = vgic_v2_parse_attr(dev, attr, &reg_attr);
496*4882a593Smuzhiyun 	if (ret)
497*4882a593Smuzhiyun 		return ret;
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	vcpu = reg_attr.vcpu;
500*4882a593Smuzhiyun 	addr = reg_attr.addr;
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	switch (attr->group) {
503*4882a593Smuzhiyun 	case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
504*4882a593Smuzhiyun 		iodev.regions = vgic_v2_dist_registers;
505*4882a593Smuzhiyun 		iodev.nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
506*4882a593Smuzhiyun 		iodev.base_addr = 0;
507*4882a593Smuzhiyun 		break;
508*4882a593Smuzhiyun 	case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
509*4882a593Smuzhiyun 		iodev.regions = vgic_v2_cpu_registers;
510*4882a593Smuzhiyun 		iodev.nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers);
511*4882a593Smuzhiyun 		iodev.base_addr = 0;
512*4882a593Smuzhiyun 		break;
513*4882a593Smuzhiyun 	default:
514*4882a593Smuzhiyun 		return -ENXIO;
515*4882a593Smuzhiyun 	}
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	/* We only support aligned 32-bit accesses. */
518*4882a593Smuzhiyun 	if (addr & 3)
519*4882a593Smuzhiyun 		return -ENXIO;
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	region = vgic_get_mmio_region(vcpu, &iodev, addr, sizeof(u32));
522*4882a593Smuzhiyun 	if (!region)
523*4882a593Smuzhiyun 		return -ENXIO;
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 	return 0;
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun 
vgic_v2_cpuif_uaccess(struct kvm_vcpu * vcpu,bool is_write,int offset,u32 * val)528*4882a593Smuzhiyun int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write,
529*4882a593Smuzhiyun 			  int offset, u32 *val)
530*4882a593Smuzhiyun {
531*4882a593Smuzhiyun 	struct vgic_io_device dev = {
532*4882a593Smuzhiyun 		.regions = vgic_v2_cpu_registers,
533*4882a593Smuzhiyun 		.nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers),
534*4882a593Smuzhiyun 		.iodev_type = IODEV_CPUIF,
535*4882a593Smuzhiyun 	};
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	return vgic_uaccess(vcpu, &dev, is_write, offset, val);
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun 
vgic_v2_dist_uaccess(struct kvm_vcpu * vcpu,bool is_write,int offset,u32 * val)540*4882a593Smuzhiyun int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
541*4882a593Smuzhiyun 			 int offset, u32 *val)
542*4882a593Smuzhiyun {
543*4882a593Smuzhiyun 	struct vgic_io_device dev = {
544*4882a593Smuzhiyun 		.regions = vgic_v2_dist_registers,
545*4882a593Smuzhiyun 		.nr_regions = ARRAY_SIZE(vgic_v2_dist_registers),
546*4882a593Smuzhiyun 		.iodev_type = IODEV_DIST,
547*4882a593Smuzhiyun 	};
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	return vgic_uaccess(vcpu, &dev, is_write, offset, val);
550*4882a593Smuzhiyun }
551