1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * VGIC MMIO handling functions
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include <linux/bitops.h>
7*4882a593Smuzhiyun #include <linux/bsearch.h>
8*4882a593Smuzhiyun #include <linux/interrupt.h>
9*4882a593Smuzhiyun #include <linux/irq.h>
10*4882a593Smuzhiyun #include <linux/kvm.h>
11*4882a593Smuzhiyun #include <linux/kvm_host.h>
12*4882a593Smuzhiyun #include <kvm/iodev.h>
13*4882a593Smuzhiyun #include <kvm/arm_arch_timer.h>
14*4882a593Smuzhiyun #include <kvm/arm_vgic.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include "vgic.h"
17*4882a593Smuzhiyun #include "vgic-mmio.h"
18*4882a593Smuzhiyun
vgic_mmio_read_raz(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)19*4882a593Smuzhiyun unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
20*4882a593Smuzhiyun gpa_t addr, unsigned int len)
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun return 0;
23*4882a593Smuzhiyun }
24*4882a593Smuzhiyun
vgic_mmio_read_rao(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)25*4882a593Smuzhiyun unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
26*4882a593Smuzhiyun gpa_t addr, unsigned int len)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun return -1UL;
29*4882a593Smuzhiyun }
30*4882a593Smuzhiyun
vgic_mmio_write_wi(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)31*4882a593Smuzhiyun void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
32*4882a593Smuzhiyun unsigned int len, unsigned long val)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun /* Ignore */
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun
vgic_mmio_uaccess_write_wi(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)37*4882a593Smuzhiyun int vgic_mmio_uaccess_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
38*4882a593Smuzhiyun unsigned int len, unsigned long val)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun /* Ignore */
41*4882a593Smuzhiyun return 0;
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun
vgic_mmio_read_group(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)44*4882a593Smuzhiyun unsigned long vgic_mmio_read_group(struct kvm_vcpu *vcpu,
45*4882a593Smuzhiyun gpa_t addr, unsigned int len)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
48*4882a593Smuzhiyun u32 value = 0;
49*4882a593Smuzhiyun int i;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun /* Loop over all IRQs affected by this read */
52*4882a593Smuzhiyun for (i = 0; i < len * 8; i++) {
53*4882a593Smuzhiyun struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun if (irq->group)
56*4882a593Smuzhiyun value |= BIT(i);
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun vgic_put_irq(vcpu->kvm, irq);
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun return value;
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun
vgic_update_vsgi(struct vgic_irq * irq)64*4882a593Smuzhiyun static void vgic_update_vsgi(struct vgic_irq *irq)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun WARN_ON(its_prop_update_vsgi(irq->host_irq, irq->priority, irq->group));
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
vgic_mmio_write_group(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)69*4882a593Smuzhiyun void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
70*4882a593Smuzhiyun unsigned int len, unsigned long val)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
73*4882a593Smuzhiyun int i;
74*4882a593Smuzhiyun unsigned long flags;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun for (i = 0; i < len * 8; i++) {
77*4882a593Smuzhiyun struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun raw_spin_lock_irqsave(&irq->irq_lock, flags);
80*4882a593Smuzhiyun irq->group = !!(val & BIT(i));
81*4882a593Smuzhiyun if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
82*4882a593Smuzhiyun vgic_update_vsgi(irq);
83*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
84*4882a593Smuzhiyun } else {
85*4882a593Smuzhiyun vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun vgic_put_irq(vcpu->kvm, irq);
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun /*
93*4882a593Smuzhiyun * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value
94*4882a593Smuzhiyun * of the enabled bit, so there is only one function for both here.
95*4882a593Smuzhiyun */
vgic_mmio_read_enable(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)96*4882a593Smuzhiyun unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
97*4882a593Smuzhiyun gpa_t addr, unsigned int len)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
100*4882a593Smuzhiyun u32 value = 0;
101*4882a593Smuzhiyun int i;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun /* Loop over all IRQs affected by this read */
104*4882a593Smuzhiyun for (i = 0; i < len * 8; i++) {
105*4882a593Smuzhiyun struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun if (irq->enabled)
108*4882a593Smuzhiyun value |= (1U << i);
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun vgic_put_irq(vcpu->kvm, irq);
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun return value;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun
vgic_mmio_write_senable(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)116*4882a593Smuzhiyun void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
117*4882a593Smuzhiyun gpa_t addr, unsigned int len,
118*4882a593Smuzhiyun unsigned long val)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
121*4882a593Smuzhiyun int i;
122*4882a593Smuzhiyun unsigned long flags;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun for_each_set_bit(i, &val, len * 8) {
125*4882a593Smuzhiyun struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun raw_spin_lock_irqsave(&irq->irq_lock, flags);
128*4882a593Smuzhiyun if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
129*4882a593Smuzhiyun if (!irq->enabled) {
130*4882a593Smuzhiyun struct irq_data *data;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun irq->enabled = true;
133*4882a593Smuzhiyun data = &irq_to_desc(irq->host_irq)->irq_data;
134*4882a593Smuzhiyun while (irqd_irq_disabled(data))
135*4882a593Smuzhiyun enable_irq(irq->host_irq);
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
139*4882a593Smuzhiyun vgic_put_irq(vcpu->kvm, irq);
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun continue;
142*4882a593Smuzhiyun } else if (vgic_irq_is_mapped_level(irq)) {
143*4882a593Smuzhiyun bool was_high = irq->line_level;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun /*
146*4882a593Smuzhiyun * We need to update the state of the interrupt because
147*4882a593Smuzhiyun * the guest might have changed the state of the device
148*4882a593Smuzhiyun * while the interrupt was disabled at the VGIC level.
149*4882a593Smuzhiyun */
150*4882a593Smuzhiyun irq->line_level = vgic_get_phys_line_level(irq);
151*4882a593Smuzhiyun /*
152*4882a593Smuzhiyun * Deactivate the physical interrupt so the GIC will let
153*4882a593Smuzhiyun * us know when it is asserted again.
154*4882a593Smuzhiyun */
155*4882a593Smuzhiyun if (!irq->active && was_high && !irq->line_level)
156*4882a593Smuzhiyun vgic_irq_set_phys_active(irq, false);
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun irq->enabled = true;
159*4882a593Smuzhiyun vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun vgic_put_irq(vcpu->kvm, irq);
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
vgic_mmio_write_cenable(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)165*4882a593Smuzhiyun void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
166*4882a593Smuzhiyun gpa_t addr, unsigned int len,
167*4882a593Smuzhiyun unsigned long val)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
170*4882a593Smuzhiyun int i;
171*4882a593Smuzhiyun unsigned long flags;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun for_each_set_bit(i, &val, len * 8) {
174*4882a593Smuzhiyun struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun raw_spin_lock_irqsave(&irq->irq_lock, flags);
177*4882a593Smuzhiyun if (irq->hw && vgic_irq_is_sgi(irq->intid) && irq->enabled)
178*4882a593Smuzhiyun disable_irq_nosync(irq->host_irq);
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun irq->enabled = false;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
183*4882a593Smuzhiyun vgic_put_irq(vcpu->kvm, irq);
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
vgic_uaccess_write_senable(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)187*4882a593Smuzhiyun int vgic_uaccess_write_senable(struct kvm_vcpu *vcpu,
188*4882a593Smuzhiyun gpa_t addr, unsigned int len,
189*4882a593Smuzhiyun unsigned long val)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
192*4882a593Smuzhiyun int i;
193*4882a593Smuzhiyun unsigned long flags;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun for_each_set_bit(i, &val, len * 8) {
196*4882a593Smuzhiyun struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun raw_spin_lock_irqsave(&irq->irq_lock, flags);
199*4882a593Smuzhiyun irq->enabled = true;
200*4882a593Smuzhiyun vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun vgic_put_irq(vcpu->kvm, irq);
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun return 0;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
vgic_uaccess_write_cenable(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)208*4882a593Smuzhiyun int vgic_uaccess_write_cenable(struct kvm_vcpu *vcpu,
209*4882a593Smuzhiyun gpa_t addr, unsigned int len,
210*4882a593Smuzhiyun unsigned long val)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
213*4882a593Smuzhiyun int i;
214*4882a593Smuzhiyun unsigned long flags;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun for_each_set_bit(i, &val, len * 8) {
217*4882a593Smuzhiyun struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun raw_spin_lock_irqsave(&irq->irq_lock, flags);
220*4882a593Smuzhiyun irq->enabled = false;
221*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun vgic_put_irq(vcpu->kvm, irq);
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun return 0;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
__read_pending(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,bool is_user)229*4882a593Smuzhiyun static unsigned long __read_pending(struct kvm_vcpu *vcpu,
230*4882a593Smuzhiyun gpa_t addr, unsigned int len,
231*4882a593Smuzhiyun bool is_user)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
234*4882a593Smuzhiyun u32 value = 0;
235*4882a593Smuzhiyun int i;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun /* Loop over all IRQs affected by this read */
238*4882a593Smuzhiyun for (i = 0; i < len * 8; i++) {
239*4882a593Smuzhiyun struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
240*4882a593Smuzhiyun unsigned long flags;
241*4882a593Smuzhiyun bool val;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun raw_spin_lock_irqsave(&irq->irq_lock, flags);
244*4882a593Smuzhiyun if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
245*4882a593Smuzhiyun int err;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun val = false;
248*4882a593Smuzhiyun err = irq_get_irqchip_state(irq->host_irq,
249*4882a593Smuzhiyun IRQCHIP_STATE_PENDING,
250*4882a593Smuzhiyun &val);
251*4882a593Smuzhiyun WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
252*4882a593Smuzhiyun } else if (!is_user && vgic_irq_is_mapped_level(irq)) {
253*4882a593Smuzhiyun val = vgic_get_phys_line_level(irq);
254*4882a593Smuzhiyun } else {
255*4882a593Smuzhiyun val = irq_is_pending(irq);
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun value |= ((u32)val << i);
259*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun vgic_put_irq(vcpu->kvm, irq);
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun return value;
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun
vgic_mmio_read_pending(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)267*4882a593Smuzhiyun unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
268*4882a593Smuzhiyun gpa_t addr, unsigned int len)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun return __read_pending(vcpu, addr, len, false);
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
vgic_uaccess_read_pending(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)273*4882a593Smuzhiyun unsigned long vgic_uaccess_read_pending(struct kvm_vcpu *vcpu,
274*4882a593Smuzhiyun gpa_t addr, unsigned int len)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun return __read_pending(vcpu, addr, len, true);
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun
is_vgic_v2_sgi(struct kvm_vcpu * vcpu,struct vgic_irq * irq)279*4882a593Smuzhiyun static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun return (vgic_irq_is_sgi(irq->intid) &&
282*4882a593Smuzhiyun vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
vgic_mmio_write_spending(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)285*4882a593Smuzhiyun void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
286*4882a593Smuzhiyun gpa_t addr, unsigned int len,
287*4882a593Smuzhiyun unsigned long val)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
290*4882a593Smuzhiyun int i;
291*4882a593Smuzhiyun unsigned long flags;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun for_each_set_bit(i, &val, len * 8) {
294*4882a593Smuzhiyun struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun /* GICD_ISPENDR0 SGI bits are WI */
297*4882a593Smuzhiyun if (is_vgic_v2_sgi(vcpu, irq)) {
298*4882a593Smuzhiyun vgic_put_irq(vcpu->kvm, irq);
299*4882a593Smuzhiyun continue;
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun raw_spin_lock_irqsave(&irq->irq_lock, flags);
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
305*4882a593Smuzhiyun /* HW SGI? Ask the GIC to inject it */
306*4882a593Smuzhiyun int err;
307*4882a593Smuzhiyun err = irq_set_irqchip_state(irq->host_irq,
308*4882a593Smuzhiyun IRQCHIP_STATE_PENDING,
309*4882a593Smuzhiyun true);
310*4882a593Smuzhiyun WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
313*4882a593Smuzhiyun vgic_put_irq(vcpu->kvm, irq);
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun continue;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun irq->pending_latch = true;
319*4882a593Smuzhiyun if (irq->hw)
320*4882a593Smuzhiyun vgic_irq_set_phys_active(irq, true);
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
323*4882a593Smuzhiyun vgic_put_irq(vcpu->kvm, irq);
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun
vgic_uaccess_write_spending(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)327*4882a593Smuzhiyun int vgic_uaccess_write_spending(struct kvm_vcpu *vcpu,
328*4882a593Smuzhiyun gpa_t addr, unsigned int len,
329*4882a593Smuzhiyun unsigned long val)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
332*4882a593Smuzhiyun int i;
333*4882a593Smuzhiyun unsigned long flags;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun for_each_set_bit(i, &val, len * 8) {
336*4882a593Smuzhiyun struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun raw_spin_lock_irqsave(&irq->irq_lock, flags);
339*4882a593Smuzhiyun irq->pending_latch = true;
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun /*
342*4882a593Smuzhiyun * GICv2 SGIs are terribly broken. We can't restore
343*4882a593Smuzhiyun * the source of the interrupt, so just pick the vcpu
344*4882a593Smuzhiyun * itself as the source...
345*4882a593Smuzhiyun */
346*4882a593Smuzhiyun if (is_vgic_v2_sgi(vcpu, irq))
347*4882a593Smuzhiyun irq->source |= BIT(vcpu->vcpu_id);
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun vgic_put_irq(vcpu->kvm, irq);
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun return 0;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun /* Must be called with irq->irq_lock held */
vgic_hw_irq_cpending(struct kvm_vcpu * vcpu,struct vgic_irq * irq)358*4882a593Smuzhiyun static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun irq->pending_latch = false;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun /*
363*4882a593Smuzhiyun * We don't want the guest to effectively mask the physical
364*4882a593Smuzhiyun * interrupt by doing a write to SPENDR followed by a write to
365*4882a593Smuzhiyun * CPENDR for HW interrupts, so we clear the active state on
366*4882a593Smuzhiyun * the physical side if the virtual interrupt is not active.
367*4882a593Smuzhiyun * This may lead to taking an additional interrupt on the
368*4882a593Smuzhiyun * host, but that should not be a problem as the worst that
369*4882a593Smuzhiyun * can happen is an additional vgic injection. We also clear
370*4882a593Smuzhiyun * the pending state to maintain proper semantics for edge HW
371*4882a593Smuzhiyun * interrupts.
372*4882a593Smuzhiyun */
373*4882a593Smuzhiyun vgic_irq_set_phys_pending(irq, false);
374*4882a593Smuzhiyun if (!irq->active)
375*4882a593Smuzhiyun vgic_irq_set_phys_active(irq, false);
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun
vgic_mmio_write_cpending(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)378*4882a593Smuzhiyun void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
379*4882a593Smuzhiyun gpa_t addr, unsigned int len,
380*4882a593Smuzhiyun unsigned long val)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
383*4882a593Smuzhiyun int i;
384*4882a593Smuzhiyun unsigned long flags;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun for_each_set_bit(i, &val, len * 8) {
387*4882a593Smuzhiyun struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun /* GICD_ICPENDR0 SGI bits are WI */
390*4882a593Smuzhiyun if (is_vgic_v2_sgi(vcpu, irq)) {
391*4882a593Smuzhiyun vgic_put_irq(vcpu->kvm, irq);
392*4882a593Smuzhiyun continue;
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun raw_spin_lock_irqsave(&irq->irq_lock, flags);
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
398*4882a593Smuzhiyun /* HW SGI? Ask the GIC to clear its pending bit */
399*4882a593Smuzhiyun int err;
400*4882a593Smuzhiyun err = irq_set_irqchip_state(irq->host_irq,
401*4882a593Smuzhiyun IRQCHIP_STATE_PENDING,
402*4882a593Smuzhiyun false);
403*4882a593Smuzhiyun WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
406*4882a593Smuzhiyun vgic_put_irq(vcpu->kvm, irq);
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun continue;
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun if (irq->hw)
412*4882a593Smuzhiyun vgic_hw_irq_cpending(vcpu, irq);
413*4882a593Smuzhiyun else
414*4882a593Smuzhiyun irq->pending_latch = false;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
417*4882a593Smuzhiyun vgic_put_irq(vcpu->kvm, irq);
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun
vgic_uaccess_write_cpending(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)421*4882a593Smuzhiyun int vgic_uaccess_write_cpending(struct kvm_vcpu *vcpu,
422*4882a593Smuzhiyun gpa_t addr, unsigned int len,
423*4882a593Smuzhiyun unsigned long val)
424*4882a593Smuzhiyun {
425*4882a593Smuzhiyun u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
426*4882a593Smuzhiyun int i;
427*4882a593Smuzhiyun unsigned long flags;
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun for_each_set_bit(i, &val, len * 8) {
430*4882a593Smuzhiyun struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun raw_spin_lock_irqsave(&irq->irq_lock, flags);
433*4882a593Smuzhiyun /*
434*4882a593Smuzhiyun * More fun with GICv2 SGIs! If we're clearing one of them
435*4882a593Smuzhiyun * from userspace, which source vcpu to clear? Let's not
436*4882a593Smuzhiyun * even think of it, and blow the whole set.
437*4882a593Smuzhiyun */
438*4882a593Smuzhiyun if (is_vgic_v2_sgi(vcpu, irq))
439*4882a593Smuzhiyun irq->source = 0;
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun irq->pending_latch = false;
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun vgic_put_irq(vcpu->kvm, irq);
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun return 0;
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun /*
452*4882a593Smuzhiyun * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
453*4882a593Smuzhiyun * is not queued on some running VCPU's LRs, because then the change to the
454*4882a593Smuzhiyun * active state can be overwritten when the VCPU's state is synced coming back
455*4882a593Smuzhiyun * from the guest.
456*4882a593Smuzhiyun *
457*4882a593Smuzhiyun * For shared interrupts as well as GICv3 private interrupts, we have to
458*4882a593Smuzhiyun * stop all the VCPUs because interrupts can be migrated while we don't hold
459*4882a593Smuzhiyun * the IRQ locks and we don't want to be chasing moving targets.
460*4882a593Smuzhiyun *
461*4882a593Smuzhiyun * For GICv2 private interrupts we don't have to do anything because
462*4882a593Smuzhiyun * userspace accesses to the VGIC state already require all VCPUs to be
463*4882a593Smuzhiyun * stopped, and only the VCPU itself can modify its private interrupts
464*4882a593Smuzhiyun * active state, which guarantees that the VCPU is not running.
465*4882a593Smuzhiyun */
vgic_access_active_prepare(struct kvm_vcpu * vcpu,u32 intid)466*4882a593Smuzhiyun static void vgic_access_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
467*4882a593Smuzhiyun {
468*4882a593Smuzhiyun if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
469*4882a593Smuzhiyun intid >= VGIC_NR_PRIVATE_IRQS)
470*4882a593Smuzhiyun kvm_arm_halt_guest(vcpu->kvm);
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun /* See vgic_access_active_prepare */
vgic_access_active_finish(struct kvm_vcpu * vcpu,u32 intid)474*4882a593Smuzhiyun static void vgic_access_active_finish(struct kvm_vcpu *vcpu, u32 intid)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
477*4882a593Smuzhiyun intid >= VGIC_NR_PRIVATE_IRQS)
478*4882a593Smuzhiyun kvm_arm_resume_guest(vcpu->kvm);
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
__vgic_mmio_read_active(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)481*4882a593Smuzhiyun static unsigned long __vgic_mmio_read_active(struct kvm_vcpu *vcpu,
482*4882a593Smuzhiyun gpa_t addr, unsigned int len)
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
485*4882a593Smuzhiyun u32 value = 0;
486*4882a593Smuzhiyun int i;
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun /* Loop over all IRQs affected by this read */
489*4882a593Smuzhiyun for (i = 0; i < len * 8; i++) {
490*4882a593Smuzhiyun struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun /*
493*4882a593Smuzhiyun * Even for HW interrupts, don't evaluate the HW state as
494*4882a593Smuzhiyun * all the guest is interested in is the virtual state.
495*4882a593Smuzhiyun */
496*4882a593Smuzhiyun if (irq->active)
497*4882a593Smuzhiyun value |= (1U << i);
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun vgic_put_irq(vcpu->kvm, irq);
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun return value;
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun
vgic_mmio_read_active(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)505*4882a593Smuzhiyun unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
506*4882a593Smuzhiyun gpa_t addr, unsigned int len)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
509*4882a593Smuzhiyun u32 val;
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun mutex_lock(&vcpu->kvm->lock);
512*4882a593Smuzhiyun vgic_access_active_prepare(vcpu, intid);
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun val = __vgic_mmio_read_active(vcpu, addr, len);
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun vgic_access_active_finish(vcpu, intid);
517*4882a593Smuzhiyun mutex_unlock(&vcpu->kvm->lock);
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun return val;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun
vgic_uaccess_read_active(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)522*4882a593Smuzhiyun unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu,
523*4882a593Smuzhiyun gpa_t addr, unsigned int len)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun return __vgic_mmio_read_active(vcpu, addr, len);
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun /* Must be called with irq->irq_lock held */
vgic_hw_irq_change_active(struct kvm_vcpu * vcpu,struct vgic_irq * irq,bool active,bool is_uaccess)529*4882a593Smuzhiyun static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
530*4882a593Smuzhiyun bool active, bool is_uaccess)
531*4882a593Smuzhiyun {
532*4882a593Smuzhiyun if (is_uaccess)
533*4882a593Smuzhiyun return;
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun irq->active = active;
536*4882a593Smuzhiyun vgic_irq_set_phys_active(irq, active);
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun
vgic_mmio_change_active(struct kvm_vcpu * vcpu,struct vgic_irq * irq,bool active)539*4882a593Smuzhiyun static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
540*4882a593Smuzhiyun bool active)
541*4882a593Smuzhiyun {
542*4882a593Smuzhiyun unsigned long flags;
543*4882a593Smuzhiyun struct kvm_vcpu *requester_vcpu = kvm_get_running_vcpu();
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun raw_spin_lock_irqsave(&irq->irq_lock, flags);
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun if (irq->hw && !vgic_irq_is_sgi(irq->intid)) {
548*4882a593Smuzhiyun vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
549*4882a593Smuzhiyun } else if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
550*4882a593Smuzhiyun /*
551*4882a593Smuzhiyun * GICv4.1 VSGI feature doesn't track an active state,
552*4882a593Smuzhiyun * so let's not kid ourselves, there is nothing we can
553*4882a593Smuzhiyun * do here.
554*4882a593Smuzhiyun */
555*4882a593Smuzhiyun irq->active = false;
556*4882a593Smuzhiyun } else {
557*4882a593Smuzhiyun u32 model = vcpu->kvm->arch.vgic.vgic_model;
558*4882a593Smuzhiyun u8 active_source;
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun irq->active = active;
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun /*
563*4882a593Smuzhiyun * The GICv2 architecture indicates that the source CPUID for
564*4882a593Smuzhiyun * an SGI should be provided during an EOI which implies that
565*4882a593Smuzhiyun * the active state is stored somewhere, but at the same time
566*4882a593Smuzhiyun * this state is not architecturally exposed anywhere and we
567*4882a593Smuzhiyun * have no way of knowing the right source.
568*4882a593Smuzhiyun *
569*4882a593Smuzhiyun * This may lead to a VCPU not being able to receive
570*4882a593Smuzhiyun * additional instances of a particular SGI after migration
571*4882a593Smuzhiyun * for a GICv2 VM on some GIC implementations. Oh well.
572*4882a593Smuzhiyun */
573*4882a593Smuzhiyun active_source = (requester_vcpu) ? requester_vcpu->vcpu_id : 0;
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun if (model == KVM_DEV_TYPE_ARM_VGIC_V2 &&
576*4882a593Smuzhiyun active && vgic_irq_is_sgi(irq->intid))
577*4882a593Smuzhiyun irq->active_source = active_source;
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun if (irq->active)
581*4882a593Smuzhiyun vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
582*4882a593Smuzhiyun else
583*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun
__vgic_mmio_write_cactive(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)586*4882a593Smuzhiyun static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
587*4882a593Smuzhiyun gpa_t addr, unsigned int len,
588*4882a593Smuzhiyun unsigned long val)
589*4882a593Smuzhiyun {
590*4882a593Smuzhiyun u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
591*4882a593Smuzhiyun int i;
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun for_each_set_bit(i, &val, len * 8) {
594*4882a593Smuzhiyun struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
595*4882a593Smuzhiyun vgic_mmio_change_active(vcpu, irq, false);
596*4882a593Smuzhiyun vgic_put_irq(vcpu->kvm, irq);
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun
vgic_mmio_write_cactive(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)600*4882a593Smuzhiyun void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
601*4882a593Smuzhiyun gpa_t addr, unsigned int len,
602*4882a593Smuzhiyun unsigned long val)
603*4882a593Smuzhiyun {
604*4882a593Smuzhiyun u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun mutex_lock(&vcpu->kvm->lock);
607*4882a593Smuzhiyun vgic_access_active_prepare(vcpu, intid);
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun __vgic_mmio_write_cactive(vcpu, addr, len, val);
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun vgic_access_active_finish(vcpu, intid);
612*4882a593Smuzhiyun mutex_unlock(&vcpu->kvm->lock);
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun
vgic_mmio_uaccess_write_cactive(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)615*4882a593Smuzhiyun int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
616*4882a593Smuzhiyun gpa_t addr, unsigned int len,
617*4882a593Smuzhiyun unsigned long val)
618*4882a593Smuzhiyun {
619*4882a593Smuzhiyun __vgic_mmio_write_cactive(vcpu, addr, len, val);
620*4882a593Smuzhiyun return 0;
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun
__vgic_mmio_write_sactive(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)623*4882a593Smuzhiyun static void __vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
624*4882a593Smuzhiyun gpa_t addr, unsigned int len,
625*4882a593Smuzhiyun unsigned long val)
626*4882a593Smuzhiyun {
627*4882a593Smuzhiyun u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
628*4882a593Smuzhiyun int i;
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun for_each_set_bit(i, &val, len * 8) {
631*4882a593Smuzhiyun struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
632*4882a593Smuzhiyun vgic_mmio_change_active(vcpu, irq, true);
633*4882a593Smuzhiyun vgic_put_irq(vcpu->kvm, irq);
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun
vgic_mmio_write_sactive(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)637*4882a593Smuzhiyun void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
638*4882a593Smuzhiyun gpa_t addr, unsigned int len,
639*4882a593Smuzhiyun unsigned long val)
640*4882a593Smuzhiyun {
641*4882a593Smuzhiyun u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun mutex_lock(&vcpu->kvm->lock);
644*4882a593Smuzhiyun vgic_access_active_prepare(vcpu, intid);
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun __vgic_mmio_write_sactive(vcpu, addr, len, val);
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun vgic_access_active_finish(vcpu, intid);
649*4882a593Smuzhiyun mutex_unlock(&vcpu->kvm->lock);
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun
vgic_mmio_uaccess_write_sactive(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)652*4882a593Smuzhiyun int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
653*4882a593Smuzhiyun gpa_t addr, unsigned int len,
654*4882a593Smuzhiyun unsigned long val)
655*4882a593Smuzhiyun {
656*4882a593Smuzhiyun __vgic_mmio_write_sactive(vcpu, addr, len, val);
657*4882a593Smuzhiyun return 0;
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun
vgic_mmio_read_priority(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)660*4882a593Smuzhiyun unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
661*4882a593Smuzhiyun gpa_t addr, unsigned int len)
662*4882a593Smuzhiyun {
663*4882a593Smuzhiyun u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
664*4882a593Smuzhiyun int i;
665*4882a593Smuzhiyun u64 val = 0;
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun for (i = 0; i < len; i++) {
668*4882a593Smuzhiyun struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun val |= (u64)irq->priority << (i * 8);
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun vgic_put_irq(vcpu->kvm, irq);
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun return val;
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun /*
679*4882a593Smuzhiyun * We currently don't handle changing the priority of an interrupt that
680*4882a593Smuzhiyun * is already pending on a VCPU. If there is a need for this, we would
681*4882a593Smuzhiyun * need to make this VCPU exit and re-evaluate the priorities, potentially
682*4882a593Smuzhiyun * leading to this interrupt getting presented now to the guest (if it has
683*4882a593Smuzhiyun * been masked by the priority mask before).
684*4882a593Smuzhiyun */
vgic_mmio_write_priority(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)685*4882a593Smuzhiyun void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
686*4882a593Smuzhiyun gpa_t addr, unsigned int len,
687*4882a593Smuzhiyun unsigned long val)
688*4882a593Smuzhiyun {
689*4882a593Smuzhiyun u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
690*4882a593Smuzhiyun int i;
691*4882a593Smuzhiyun unsigned long flags;
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun for (i = 0; i < len; i++) {
694*4882a593Smuzhiyun struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun raw_spin_lock_irqsave(&irq->irq_lock, flags);
697*4882a593Smuzhiyun /* Narrow the priority range to what we actually support */
698*4882a593Smuzhiyun irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
699*4882a593Smuzhiyun if (irq->hw && vgic_irq_is_sgi(irq->intid))
700*4882a593Smuzhiyun vgic_update_vsgi(irq);
701*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun vgic_put_irq(vcpu->kvm, irq);
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun
vgic_mmio_read_config(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len)707*4882a593Smuzhiyun unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
708*4882a593Smuzhiyun gpa_t addr, unsigned int len)
709*4882a593Smuzhiyun {
710*4882a593Smuzhiyun u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
711*4882a593Smuzhiyun u32 value = 0;
712*4882a593Smuzhiyun int i;
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun for (i = 0; i < len * 4; i++) {
715*4882a593Smuzhiyun struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun if (irq->config == VGIC_CONFIG_EDGE)
718*4882a593Smuzhiyun value |= (2U << (i * 2));
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun vgic_put_irq(vcpu->kvm, irq);
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun return value;
724*4882a593Smuzhiyun }
725*4882a593Smuzhiyun
vgic_mmio_write_config(struct kvm_vcpu * vcpu,gpa_t addr,unsigned int len,unsigned long val)726*4882a593Smuzhiyun void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
727*4882a593Smuzhiyun gpa_t addr, unsigned int len,
728*4882a593Smuzhiyun unsigned long val)
729*4882a593Smuzhiyun {
730*4882a593Smuzhiyun u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
731*4882a593Smuzhiyun int i;
732*4882a593Smuzhiyun unsigned long flags;
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun for (i = 0; i < len * 4; i++) {
735*4882a593Smuzhiyun struct vgic_irq *irq;
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun /*
738*4882a593Smuzhiyun * The configuration cannot be changed for SGIs in general,
739*4882a593Smuzhiyun * for PPIs this is IMPLEMENTATION DEFINED. The arch timer
740*4882a593Smuzhiyun * code relies on PPIs being level triggered, so we also
741*4882a593Smuzhiyun * make them read-only here.
742*4882a593Smuzhiyun */
743*4882a593Smuzhiyun if (intid + i < VGIC_NR_PRIVATE_IRQS)
744*4882a593Smuzhiyun continue;
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
747*4882a593Smuzhiyun raw_spin_lock_irqsave(&irq->irq_lock, flags);
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun if (test_bit(i * 2 + 1, &val))
750*4882a593Smuzhiyun irq->config = VGIC_CONFIG_EDGE;
751*4882a593Smuzhiyun else
752*4882a593Smuzhiyun irq->config = VGIC_CONFIG_LEVEL;
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
755*4882a593Smuzhiyun vgic_put_irq(vcpu->kvm, irq);
756*4882a593Smuzhiyun }
757*4882a593Smuzhiyun }
758*4882a593Smuzhiyun
vgic_read_irq_line_level_info(struct kvm_vcpu * vcpu,u32 intid)759*4882a593Smuzhiyun u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid)
760*4882a593Smuzhiyun {
761*4882a593Smuzhiyun int i;
762*4882a593Smuzhiyun u64 val = 0;
763*4882a593Smuzhiyun int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun for (i = 0; i < 32; i++) {
766*4882a593Smuzhiyun struct vgic_irq *irq;
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
769*4882a593Smuzhiyun continue;
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
772*4882a593Smuzhiyun if (irq->config == VGIC_CONFIG_LEVEL && irq->line_level)
773*4882a593Smuzhiyun val |= (1U << i);
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun vgic_put_irq(vcpu->kvm, irq);
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun return val;
779*4882a593Smuzhiyun }
780*4882a593Smuzhiyun
vgic_write_irq_line_level_info(struct kvm_vcpu * vcpu,u32 intid,const u64 val)781*4882a593Smuzhiyun void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
782*4882a593Smuzhiyun const u64 val)
783*4882a593Smuzhiyun {
784*4882a593Smuzhiyun int i;
785*4882a593Smuzhiyun int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
786*4882a593Smuzhiyun unsigned long flags;
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun for (i = 0; i < 32; i++) {
789*4882a593Smuzhiyun struct vgic_irq *irq;
790*4882a593Smuzhiyun bool new_level;
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
793*4882a593Smuzhiyun continue;
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun /*
798*4882a593Smuzhiyun * Line level is set irrespective of irq type
799*4882a593Smuzhiyun * (level or edge) to avoid dependency that VM should
800*4882a593Smuzhiyun * restore irq config before line level.
801*4882a593Smuzhiyun */
802*4882a593Smuzhiyun new_level = !!(val & (1U << i));
803*4882a593Smuzhiyun raw_spin_lock_irqsave(&irq->irq_lock, flags);
804*4882a593Smuzhiyun irq->line_level = new_level;
805*4882a593Smuzhiyun if (new_level)
806*4882a593Smuzhiyun vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
807*4882a593Smuzhiyun else
808*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun vgic_put_irq(vcpu->kvm, irq);
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun
match_region(const void * key,const void * elt)814*4882a593Smuzhiyun static int match_region(const void *key, const void *elt)
815*4882a593Smuzhiyun {
816*4882a593Smuzhiyun const unsigned int offset = (unsigned long)key;
817*4882a593Smuzhiyun const struct vgic_register_region *region = elt;
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun if (offset < region->reg_offset)
820*4882a593Smuzhiyun return -1;
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun if (offset >= region->reg_offset + region->len)
823*4882a593Smuzhiyun return 1;
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun return 0;
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun const struct vgic_register_region *
vgic_find_mmio_region(const struct vgic_register_region * regions,int nr_regions,unsigned int offset)829*4882a593Smuzhiyun vgic_find_mmio_region(const struct vgic_register_region *regions,
830*4882a593Smuzhiyun int nr_regions, unsigned int offset)
831*4882a593Smuzhiyun {
832*4882a593Smuzhiyun return bsearch((void *)(uintptr_t)offset, regions, nr_regions,
833*4882a593Smuzhiyun sizeof(regions[0]), match_region);
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun
vgic_set_vmcr(struct kvm_vcpu * vcpu,struct vgic_vmcr * vmcr)836*4882a593Smuzhiyun void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
837*4882a593Smuzhiyun {
838*4882a593Smuzhiyun if (kvm_vgic_global_state.type == VGIC_V2)
839*4882a593Smuzhiyun vgic_v2_set_vmcr(vcpu, vmcr);
840*4882a593Smuzhiyun else
841*4882a593Smuzhiyun vgic_v3_set_vmcr(vcpu, vmcr);
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun
vgic_get_vmcr(struct kvm_vcpu * vcpu,struct vgic_vmcr * vmcr)844*4882a593Smuzhiyun void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
845*4882a593Smuzhiyun {
846*4882a593Smuzhiyun if (kvm_vgic_global_state.type == VGIC_V2)
847*4882a593Smuzhiyun vgic_v2_get_vmcr(vcpu, vmcr);
848*4882a593Smuzhiyun else
849*4882a593Smuzhiyun vgic_v3_get_vmcr(vcpu, vmcr);
850*4882a593Smuzhiyun }
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun /*
853*4882a593Smuzhiyun * kvm_mmio_read_buf() returns a value in a format where it can be converted
854*4882a593Smuzhiyun * to a byte array and be directly observed as the guest wanted it to appear
855*4882a593Smuzhiyun * in memory if it had done the store itself, which is LE for the GIC, as the
856*4882a593Smuzhiyun * guest knows the GIC is always LE.
857*4882a593Smuzhiyun *
858*4882a593Smuzhiyun * We convert this value to the CPUs native format to deal with it as a data
859*4882a593Smuzhiyun * value.
860*4882a593Smuzhiyun */
vgic_data_mmio_bus_to_host(const void * val,unsigned int len)861*4882a593Smuzhiyun unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len)
862*4882a593Smuzhiyun {
863*4882a593Smuzhiyun unsigned long data = kvm_mmio_read_buf(val, len);
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun switch (len) {
866*4882a593Smuzhiyun case 1:
867*4882a593Smuzhiyun return data;
868*4882a593Smuzhiyun case 2:
869*4882a593Smuzhiyun return le16_to_cpu(data);
870*4882a593Smuzhiyun case 4:
871*4882a593Smuzhiyun return le32_to_cpu(data);
872*4882a593Smuzhiyun default:
873*4882a593Smuzhiyun return le64_to_cpu(data);
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun }
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun /*
878*4882a593Smuzhiyun * kvm_mmio_write_buf() expects a value in a format such that if converted to
879*4882a593Smuzhiyun * a byte array it is observed as the guest would see it if it could perform
880*4882a593Smuzhiyun * the load directly. Since the GIC is LE, and the guest knows this, the
881*4882a593Smuzhiyun * guest expects a value in little endian format.
882*4882a593Smuzhiyun *
883*4882a593Smuzhiyun * We convert the data value from the CPUs native format to LE so that the
884*4882a593Smuzhiyun * value is returned in the proper format.
885*4882a593Smuzhiyun */
vgic_data_host_to_mmio_bus(void * buf,unsigned int len,unsigned long data)886*4882a593Smuzhiyun void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
887*4882a593Smuzhiyun unsigned long data)
888*4882a593Smuzhiyun {
889*4882a593Smuzhiyun switch (len) {
890*4882a593Smuzhiyun case 1:
891*4882a593Smuzhiyun break;
892*4882a593Smuzhiyun case 2:
893*4882a593Smuzhiyun data = cpu_to_le16(data);
894*4882a593Smuzhiyun break;
895*4882a593Smuzhiyun case 4:
896*4882a593Smuzhiyun data = cpu_to_le32(data);
897*4882a593Smuzhiyun break;
898*4882a593Smuzhiyun default:
899*4882a593Smuzhiyun data = cpu_to_le64(data);
900*4882a593Smuzhiyun }
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun kvm_mmio_write_buf(buf, len, data);
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun static
kvm_to_vgic_iodev(const struct kvm_io_device * dev)906*4882a593Smuzhiyun struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
907*4882a593Smuzhiyun {
908*4882a593Smuzhiyun return container_of(dev, struct vgic_io_device, dev);
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun
check_region(const struct kvm * kvm,const struct vgic_register_region * region,gpa_t addr,int len)911*4882a593Smuzhiyun static bool check_region(const struct kvm *kvm,
912*4882a593Smuzhiyun const struct vgic_register_region *region,
913*4882a593Smuzhiyun gpa_t addr, int len)
914*4882a593Smuzhiyun {
915*4882a593Smuzhiyun int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun switch (len) {
918*4882a593Smuzhiyun case sizeof(u8):
919*4882a593Smuzhiyun flags = VGIC_ACCESS_8bit;
920*4882a593Smuzhiyun break;
921*4882a593Smuzhiyun case sizeof(u32):
922*4882a593Smuzhiyun flags = VGIC_ACCESS_32bit;
923*4882a593Smuzhiyun break;
924*4882a593Smuzhiyun case sizeof(u64):
925*4882a593Smuzhiyun flags = VGIC_ACCESS_64bit;
926*4882a593Smuzhiyun break;
927*4882a593Smuzhiyun default:
928*4882a593Smuzhiyun return false;
929*4882a593Smuzhiyun }
930*4882a593Smuzhiyun
931*4882a593Smuzhiyun if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) {
932*4882a593Smuzhiyun if (!region->bits_per_irq)
933*4882a593Smuzhiyun return true;
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun /* Do we access a non-allocated IRQ? */
936*4882a593Smuzhiyun return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs;
937*4882a593Smuzhiyun }
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun return false;
940*4882a593Smuzhiyun }
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun const struct vgic_register_region *
vgic_get_mmio_region(struct kvm_vcpu * vcpu,struct vgic_io_device * iodev,gpa_t addr,int len)943*4882a593Smuzhiyun vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
944*4882a593Smuzhiyun gpa_t addr, int len)
945*4882a593Smuzhiyun {
946*4882a593Smuzhiyun const struct vgic_register_region *region;
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
949*4882a593Smuzhiyun addr - iodev->base_addr);
950*4882a593Smuzhiyun if (!region || !check_region(vcpu->kvm, region, addr, len))
951*4882a593Smuzhiyun return NULL;
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun return region;
954*4882a593Smuzhiyun }
955*4882a593Smuzhiyun
vgic_uaccess_read(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,u32 * val)956*4882a593Smuzhiyun static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
957*4882a593Smuzhiyun gpa_t addr, u32 *val)
958*4882a593Smuzhiyun {
959*4882a593Smuzhiyun struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
960*4882a593Smuzhiyun const struct vgic_register_region *region;
961*4882a593Smuzhiyun struct kvm_vcpu *r_vcpu;
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
964*4882a593Smuzhiyun if (!region) {
965*4882a593Smuzhiyun *val = 0;
966*4882a593Smuzhiyun return 0;
967*4882a593Smuzhiyun }
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
970*4882a593Smuzhiyun if (region->uaccess_read)
971*4882a593Smuzhiyun *val = region->uaccess_read(r_vcpu, addr, sizeof(u32));
972*4882a593Smuzhiyun else
973*4882a593Smuzhiyun *val = region->read(r_vcpu, addr, sizeof(u32));
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun return 0;
976*4882a593Smuzhiyun }
977*4882a593Smuzhiyun
vgic_uaccess_write(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,const u32 * val)978*4882a593Smuzhiyun static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
979*4882a593Smuzhiyun gpa_t addr, const u32 *val)
980*4882a593Smuzhiyun {
981*4882a593Smuzhiyun struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
982*4882a593Smuzhiyun const struct vgic_register_region *region;
983*4882a593Smuzhiyun struct kvm_vcpu *r_vcpu;
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
986*4882a593Smuzhiyun if (!region)
987*4882a593Smuzhiyun return 0;
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
990*4882a593Smuzhiyun if (region->uaccess_write)
991*4882a593Smuzhiyun return region->uaccess_write(r_vcpu, addr, sizeof(u32), *val);
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun region->write(r_vcpu, addr, sizeof(u32), *val);
994*4882a593Smuzhiyun return 0;
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun /*
998*4882a593Smuzhiyun * Userland access to VGIC registers.
999*4882a593Smuzhiyun */
vgic_uaccess(struct kvm_vcpu * vcpu,struct vgic_io_device * dev,bool is_write,int offset,u32 * val)1000*4882a593Smuzhiyun int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
1001*4882a593Smuzhiyun bool is_write, int offset, u32 *val)
1002*4882a593Smuzhiyun {
1003*4882a593Smuzhiyun if (is_write)
1004*4882a593Smuzhiyun return vgic_uaccess_write(vcpu, &dev->dev, offset, val);
1005*4882a593Smuzhiyun else
1006*4882a593Smuzhiyun return vgic_uaccess_read(vcpu, &dev->dev, offset, val);
1007*4882a593Smuzhiyun }
1008*4882a593Smuzhiyun
dispatch_mmio_read(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,void * val)1009*4882a593Smuzhiyun static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
1010*4882a593Smuzhiyun gpa_t addr, int len, void *val)
1011*4882a593Smuzhiyun {
1012*4882a593Smuzhiyun struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
1013*4882a593Smuzhiyun const struct vgic_register_region *region;
1014*4882a593Smuzhiyun unsigned long data = 0;
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun region = vgic_get_mmio_region(vcpu, iodev, addr, len);
1017*4882a593Smuzhiyun if (!region) {
1018*4882a593Smuzhiyun memset(val, 0, len);
1019*4882a593Smuzhiyun return 0;
1020*4882a593Smuzhiyun }
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun switch (iodev->iodev_type) {
1023*4882a593Smuzhiyun case IODEV_CPUIF:
1024*4882a593Smuzhiyun data = region->read(vcpu, addr, len);
1025*4882a593Smuzhiyun break;
1026*4882a593Smuzhiyun case IODEV_DIST:
1027*4882a593Smuzhiyun data = region->read(vcpu, addr, len);
1028*4882a593Smuzhiyun break;
1029*4882a593Smuzhiyun case IODEV_REDIST:
1030*4882a593Smuzhiyun data = region->read(iodev->redist_vcpu, addr, len);
1031*4882a593Smuzhiyun break;
1032*4882a593Smuzhiyun case IODEV_ITS:
1033*4882a593Smuzhiyun data = region->its_read(vcpu->kvm, iodev->its, addr, len);
1034*4882a593Smuzhiyun break;
1035*4882a593Smuzhiyun }
1036*4882a593Smuzhiyun
1037*4882a593Smuzhiyun vgic_data_host_to_mmio_bus(val, len, data);
1038*4882a593Smuzhiyun return 0;
1039*4882a593Smuzhiyun }
1040*4882a593Smuzhiyun
dispatch_mmio_write(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,const void * val)1041*4882a593Smuzhiyun static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
1042*4882a593Smuzhiyun gpa_t addr, int len, const void *val)
1043*4882a593Smuzhiyun {
1044*4882a593Smuzhiyun struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
1045*4882a593Smuzhiyun const struct vgic_register_region *region;
1046*4882a593Smuzhiyun unsigned long data = vgic_data_mmio_bus_to_host(val, len);
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun region = vgic_get_mmio_region(vcpu, iodev, addr, len);
1049*4882a593Smuzhiyun if (!region)
1050*4882a593Smuzhiyun return 0;
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun switch (iodev->iodev_type) {
1053*4882a593Smuzhiyun case IODEV_CPUIF:
1054*4882a593Smuzhiyun region->write(vcpu, addr, len, data);
1055*4882a593Smuzhiyun break;
1056*4882a593Smuzhiyun case IODEV_DIST:
1057*4882a593Smuzhiyun region->write(vcpu, addr, len, data);
1058*4882a593Smuzhiyun break;
1059*4882a593Smuzhiyun case IODEV_REDIST:
1060*4882a593Smuzhiyun region->write(iodev->redist_vcpu, addr, len, data);
1061*4882a593Smuzhiyun break;
1062*4882a593Smuzhiyun case IODEV_ITS:
1063*4882a593Smuzhiyun region->its_write(vcpu->kvm, iodev->its, addr, len, data);
1064*4882a593Smuzhiyun break;
1065*4882a593Smuzhiyun }
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyun return 0;
1068*4882a593Smuzhiyun }
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun struct kvm_io_device_ops kvm_io_gic_ops = {
1071*4882a593Smuzhiyun .read = dispatch_mmio_read,
1072*4882a593Smuzhiyun .write = dispatch_mmio_write,
1073*4882a593Smuzhiyun };
1074*4882a593Smuzhiyun
vgic_register_dist_iodev(struct kvm * kvm,gpa_t dist_base_address,enum vgic_type type)1075*4882a593Smuzhiyun int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
1076*4882a593Smuzhiyun enum vgic_type type)
1077*4882a593Smuzhiyun {
1078*4882a593Smuzhiyun struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
1079*4882a593Smuzhiyun int ret = 0;
1080*4882a593Smuzhiyun unsigned int len;
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun switch (type) {
1083*4882a593Smuzhiyun case VGIC_V2:
1084*4882a593Smuzhiyun len = vgic_v2_init_dist_iodev(io_device);
1085*4882a593Smuzhiyun break;
1086*4882a593Smuzhiyun case VGIC_V3:
1087*4882a593Smuzhiyun len = vgic_v3_init_dist_iodev(io_device);
1088*4882a593Smuzhiyun break;
1089*4882a593Smuzhiyun default:
1090*4882a593Smuzhiyun BUG_ON(1);
1091*4882a593Smuzhiyun }
1092*4882a593Smuzhiyun
1093*4882a593Smuzhiyun io_device->base_addr = dist_base_address;
1094*4882a593Smuzhiyun io_device->iodev_type = IODEV_DIST;
1095*4882a593Smuzhiyun io_device->redist_vcpu = NULL;
1096*4882a593Smuzhiyun
1097*4882a593Smuzhiyun mutex_lock(&kvm->slots_lock);
1098*4882a593Smuzhiyun ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
1099*4882a593Smuzhiyun len, &io_device->dev);
1100*4882a593Smuzhiyun mutex_unlock(&kvm->slots_lock);
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun return ret;
1103*4882a593Smuzhiyun }
1104