1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * handling kvm guest interrupts
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright IBM Corp. 2008, 2020
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Author(s): Carsten Otte <cotte@de.ibm.com>
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #define KMSG_COMPONENT "kvm-s390"
11*4882a593Smuzhiyun #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <linux/interrupt.h>
14*4882a593Smuzhiyun #include <linux/kvm_host.h>
15*4882a593Smuzhiyun #include <linux/hrtimer.h>
16*4882a593Smuzhiyun #include <linux/mmu_context.h>
17*4882a593Smuzhiyun #include <linux/nospec.h>
18*4882a593Smuzhiyun #include <linux/signal.h>
19*4882a593Smuzhiyun #include <linux/slab.h>
20*4882a593Smuzhiyun #include <linux/bitmap.h>
21*4882a593Smuzhiyun #include <linux/vmalloc.h>
22*4882a593Smuzhiyun #include <asm/asm-offsets.h>
23*4882a593Smuzhiyun #include <asm/dis.h>
24*4882a593Smuzhiyun #include <linux/uaccess.h>
25*4882a593Smuzhiyun #include <asm/sclp.h>
26*4882a593Smuzhiyun #include <asm/isc.h>
27*4882a593Smuzhiyun #include <asm/gmap.h>
28*4882a593Smuzhiyun #include <asm/switch_to.h>
29*4882a593Smuzhiyun #include <asm/nmi.h>
30*4882a593Smuzhiyun #include <asm/airq.h>
31*4882a593Smuzhiyun #include "kvm-s390.h"
32*4882a593Smuzhiyun #include "gaccess.h"
33*4882a593Smuzhiyun #include "trace-s390.h"
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #define PFAULT_INIT 0x0600
36*4882a593Smuzhiyun #define PFAULT_DONE 0x0680
37*4882a593Smuzhiyun #define VIRTIO_PARAM 0x0d00
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun static struct kvm_s390_gib *gib;
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun /* handle external calls via sigp interpretation facility */
sca_ext_call_pending(struct kvm_vcpu * vcpu,int * src_id)42*4882a593Smuzhiyun static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun int c, scn;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun if (!kvm_s390_test_cpuflags(vcpu, CPUSTAT_ECALL_PEND))
47*4882a593Smuzhiyun return 0;
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun BUG_ON(!kvm_s390_use_sca_entries());
50*4882a593Smuzhiyun read_lock(&vcpu->kvm->arch.sca_lock);
51*4882a593Smuzhiyun if (vcpu->kvm->arch.use_esca) {
52*4882a593Smuzhiyun struct esca_block *sca = vcpu->kvm->arch.sca;
53*4882a593Smuzhiyun union esca_sigp_ctrl sigp_ctrl =
54*4882a593Smuzhiyun sca->cpu[vcpu->vcpu_id].sigp_ctrl;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun c = sigp_ctrl.c;
57*4882a593Smuzhiyun scn = sigp_ctrl.scn;
58*4882a593Smuzhiyun } else {
59*4882a593Smuzhiyun struct bsca_block *sca = vcpu->kvm->arch.sca;
60*4882a593Smuzhiyun union bsca_sigp_ctrl sigp_ctrl =
61*4882a593Smuzhiyun sca->cpu[vcpu->vcpu_id].sigp_ctrl;
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun c = sigp_ctrl.c;
64*4882a593Smuzhiyun scn = sigp_ctrl.scn;
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun read_unlock(&vcpu->kvm->arch.sca_lock);
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun if (src_id)
69*4882a593Smuzhiyun *src_id = scn;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun return c;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
sca_inject_ext_call(struct kvm_vcpu * vcpu,int src_id)74*4882a593Smuzhiyun static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun int expect, rc;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun BUG_ON(!kvm_s390_use_sca_entries());
79*4882a593Smuzhiyun read_lock(&vcpu->kvm->arch.sca_lock);
80*4882a593Smuzhiyun if (vcpu->kvm->arch.use_esca) {
81*4882a593Smuzhiyun struct esca_block *sca = vcpu->kvm->arch.sca;
82*4882a593Smuzhiyun union esca_sigp_ctrl *sigp_ctrl =
83*4882a593Smuzhiyun &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
84*4882a593Smuzhiyun union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun new_val.scn = src_id;
87*4882a593Smuzhiyun new_val.c = 1;
88*4882a593Smuzhiyun old_val.c = 0;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun expect = old_val.value;
91*4882a593Smuzhiyun rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
92*4882a593Smuzhiyun } else {
93*4882a593Smuzhiyun struct bsca_block *sca = vcpu->kvm->arch.sca;
94*4882a593Smuzhiyun union bsca_sigp_ctrl *sigp_ctrl =
95*4882a593Smuzhiyun &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
96*4882a593Smuzhiyun union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun new_val.scn = src_id;
99*4882a593Smuzhiyun new_val.c = 1;
100*4882a593Smuzhiyun old_val.c = 0;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun expect = old_val.value;
103*4882a593Smuzhiyun rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun read_unlock(&vcpu->kvm->arch.sca_lock);
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun if (rc != expect) {
108*4882a593Smuzhiyun /* another external call is pending */
109*4882a593Smuzhiyun return -EBUSY;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun kvm_s390_set_cpuflags(vcpu, CPUSTAT_ECALL_PEND);
112*4882a593Smuzhiyun return 0;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun
sca_clear_ext_call(struct kvm_vcpu * vcpu)115*4882a593Smuzhiyun static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun int rc, expect;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun if (!kvm_s390_use_sca_entries())
120*4882a593Smuzhiyun return;
121*4882a593Smuzhiyun kvm_s390_clear_cpuflags(vcpu, CPUSTAT_ECALL_PEND);
122*4882a593Smuzhiyun read_lock(&vcpu->kvm->arch.sca_lock);
123*4882a593Smuzhiyun if (vcpu->kvm->arch.use_esca) {
124*4882a593Smuzhiyun struct esca_block *sca = vcpu->kvm->arch.sca;
125*4882a593Smuzhiyun union esca_sigp_ctrl *sigp_ctrl =
126*4882a593Smuzhiyun &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
127*4882a593Smuzhiyun union esca_sigp_ctrl old = *sigp_ctrl;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun expect = old.value;
130*4882a593Smuzhiyun rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
131*4882a593Smuzhiyun } else {
132*4882a593Smuzhiyun struct bsca_block *sca = vcpu->kvm->arch.sca;
133*4882a593Smuzhiyun union bsca_sigp_ctrl *sigp_ctrl =
134*4882a593Smuzhiyun &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
135*4882a593Smuzhiyun union bsca_sigp_ctrl old = *sigp_ctrl;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun expect = old.value;
138*4882a593Smuzhiyun rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun read_unlock(&vcpu->kvm->arch.sca_lock);
141*4882a593Smuzhiyun WARN_ON(rc != expect); /* cannot clear? */
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
psw_extint_disabled(struct kvm_vcpu * vcpu)144*4882a593Smuzhiyun int psw_extint_disabled(struct kvm_vcpu *vcpu)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
psw_ioint_disabled(struct kvm_vcpu * vcpu)149*4882a593Smuzhiyun static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
psw_mchk_disabled(struct kvm_vcpu * vcpu)154*4882a593Smuzhiyun static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun
psw_interrupts_disabled(struct kvm_vcpu * vcpu)159*4882a593Smuzhiyun static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun return psw_extint_disabled(vcpu) &&
162*4882a593Smuzhiyun psw_ioint_disabled(vcpu) &&
163*4882a593Smuzhiyun psw_mchk_disabled(vcpu);
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
ckc_interrupts_enabled(struct kvm_vcpu * vcpu)166*4882a593Smuzhiyun static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun if (psw_extint_disabled(vcpu) ||
169*4882a593Smuzhiyun !(vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SUBMASK))
170*4882a593Smuzhiyun return 0;
171*4882a593Smuzhiyun if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu))
172*4882a593Smuzhiyun /* No timer interrupts when single stepping */
173*4882a593Smuzhiyun return 0;
174*4882a593Smuzhiyun return 1;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
ckc_irq_pending(struct kvm_vcpu * vcpu)177*4882a593Smuzhiyun static int ckc_irq_pending(struct kvm_vcpu *vcpu)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
180*4882a593Smuzhiyun const u64 ckc = vcpu->arch.sie_block->ckc;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun if (vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SIGN) {
183*4882a593Smuzhiyun if ((s64)ckc >= (s64)now)
184*4882a593Smuzhiyun return 0;
185*4882a593Smuzhiyun } else if (ckc >= now) {
186*4882a593Smuzhiyun return 0;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun return ckc_interrupts_enabled(vcpu);
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
cpu_timer_interrupts_enabled(struct kvm_vcpu * vcpu)191*4882a593Smuzhiyun static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun return !psw_extint_disabled(vcpu) &&
194*4882a593Smuzhiyun (vcpu->arch.sie_block->gcr[0] & CR0_CPU_TIMER_SUBMASK);
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
cpu_timer_irq_pending(struct kvm_vcpu * vcpu)197*4882a593Smuzhiyun static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun if (!cpu_timer_interrupts_enabled(vcpu))
200*4882a593Smuzhiyun return 0;
201*4882a593Smuzhiyun return kvm_s390_get_cpu_timer(vcpu) >> 63;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun
isc_to_isc_bits(int isc)204*4882a593Smuzhiyun static uint64_t isc_to_isc_bits(int isc)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun return (0x80 >> isc) << 24;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
isc_to_int_word(u8 isc)209*4882a593Smuzhiyun static inline u32 isc_to_int_word(u8 isc)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun return ((u32)isc << 27) | 0x80000000;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
int_word_to_isc(u32 int_word)214*4882a593Smuzhiyun static inline u8 int_word_to_isc(u32 int_word)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun return (int_word & 0x38000000) >> 27;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun /*
220*4882a593Smuzhiyun * To use atomic bitmap functions, we have to provide a bitmap address
221*4882a593Smuzhiyun * that is u64 aligned. However, the ipm might be u32 aligned.
222*4882a593Smuzhiyun * Therefore, we logically start the bitmap at the very beginning of the
223*4882a593Smuzhiyun * struct and fixup the bit number.
224*4882a593Smuzhiyun */
225*4882a593Smuzhiyun #define IPM_BIT_OFFSET (offsetof(struct kvm_s390_gisa, ipm) * BITS_PER_BYTE)
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun /**
228*4882a593Smuzhiyun * gisa_set_iam - change the GISA interruption alert mask
229*4882a593Smuzhiyun *
230*4882a593Smuzhiyun * @gisa: gisa to operate on
231*4882a593Smuzhiyun * @iam: new IAM value to use
232*4882a593Smuzhiyun *
233*4882a593Smuzhiyun * Change the IAM atomically with the next alert address and the IPM
234*4882a593Smuzhiyun * of the GISA if the GISA is not part of the GIB alert list. All three
235*4882a593Smuzhiyun * fields are located in the first long word of the GISA.
236*4882a593Smuzhiyun *
237*4882a593Smuzhiyun * Returns: 0 on success
238*4882a593Smuzhiyun * -EBUSY in case the gisa is part of the alert list
239*4882a593Smuzhiyun */
gisa_set_iam(struct kvm_s390_gisa * gisa,u8 iam)240*4882a593Smuzhiyun static inline int gisa_set_iam(struct kvm_s390_gisa *gisa, u8 iam)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun u64 word, _word;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun do {
245*4882a593Smuzhiyun word = READ_ONCE(gisa->u64.word[0]);
246*4882a593Smuzhiyun if ((u64)gisa != word >> 32)
247*4882a593Smuzhiyun return -EBUSY;
248*4882a593Smuzhiyun _word = (word & ~0xffUL) | iam;
249*4882a593Smuzhiyun } while (cmpxchg(&gisa->u64.word[0], word, _word) != word);
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun return 0;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun /**
255*4882a593Smuzhiyun * gisa_clear_ipm - clear the GISA interruption pending mask
256*4882a593Smuzhiyun *
257*4882a593Smuzhiyun * @gisa: gisa to operate on
258*4882a593Smuzhiyun *
259*4882a593Smuzhiyun * Clear the IPM atomically with the next alert address and the IAM
260*4882a593Smuzhiyun * of the GISA unconditionally. All three fields are located in the
261*4882a593Smuzhiyun * first long word of the GISA.
262*4882a593Smuzhiyun */
gisa_clear_ipm(struct kvm_s390_gisa * gisa)263*4882a593Smuzhiyun static inline void gisa_clear_ipm(struct kvm_s390_gisa *gisa)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun u64 word, _word;
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun do {
268*4882a593Smuzhiyun word = READ_ONCE(gisa->u64.word[0]);
269*4882a593Smuzhiyun _word = word & ~(0xffUL << 24);
270*4882a593Smuzhiyun } while (cmpxchg(&gisa->u64.word[0], word, _word) != word);
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun /**
274*4882a593Smuzhiyun * gisa_get_ipm_or_restore_iam - return IPM or restore GISA IAM
275*4882a593Smuzhiyun *
276*4882a593Smuzhiyun * @gi: gisa interrupt struct to work on
277*4882a593Smuzhiyun *
278*4882a593Smuzhiyun * Atomically restores the interruption alert mask if none of the
279*4882a593Smuzhiyun * relevant ISCs are pending and return the IPM.
280*4882a593Smuzhiyun *
281*4882a593Smuzhiyun * Returns: the relevant pending ISCs
282*4882a593Smuzhiyun */
gisa_get_ipm_or_restore_iam(struct kvm_s390_gisa_interrupt * gi)283*4882a593Smuzhiyun static inline u8 gisa_get_ipm_or_restore_iam(struct kvm_s390_gisa_interrupt *gi)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun u8 pending_mask, alert_mask;
286*4882a593Smuzhiyun u64 word, _word;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun do {
289*4882a593Smuzhiyun word = READ_ONCE(gi->origin->u64.word[0]);
290*4882a593Smuzhiyun alert_mask = READ_ONCE(gi->alert.mask);
291*4882a593Smuzhiyun pending_mask = (u8)(word >> 24) & alert_mask;
292*4882a593Smuzhiyun if (pending_mask)
293*4882a593Smuzhiyun return pending_mask;
294*4882a593Smuzhiyun _word = (word & ~0xffUL) | alert_mask;
295*4882a593Smuzhiyun } while (cmpxchg(&gi->origin->u64.word[0], word, _word) != word);
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun return 0;
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun
gisa_in_alert_list(struct kvm_s390_gisa * gisa)300*4882a593Smuzhiyun static inline int gisa_in_alert_list(struct kvm_s390_gisa *gisa)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun return READ_ONCE(gisa->next_alert) != (u32)(u64)gisa;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
gisa_set_ipm_gisc(struct kvm_s390_gisa * gisa,u32 gisc)305*4882a593Smuzhiyun static inline void gisa_set_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun set_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
gisa_get_ipm(struct kvm_s390_gisa * gisa)310*4882a593Smuzhiyun static inline u8 gisa_get_ipm(struct kvm_s390_gisa *gisa)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun return READ_ONCE(gisa->ipm);
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun
gisa_clear_ipm_gisc(struct kvm_s390_gisa * gisa,u32 gisc)315*4882a593Smuzhiyun static inline void gisa_clear_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
gisa_tac_ipm_gisc(struct kvm_s390_gisa * gisa,u32 gisc)320*4882a593Smuzhiyun static inline int gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun return test_and_clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
pending_irqs_no_gisa(struct kvm_vcpu * vcpu)325*4882a593Smuzhiyun static inline unsigned long pending_irqs_no_gisa(struct kvm_vcpu *vcpu)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun unsigned long pending = vcpu->kvm->arch.float_int.pending_irqs |
328*4882a593Smuzhiyun vcpu->arch.local_int.pending_irqs;
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun pending &= ~vcpu->kvm->arch.float_int.masked_irqs;
331*4882a593Smuzhiyun return pending;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun
pending_irqs(struct kvm_vcpu * vcpu)334*4882a593Smuzhiyun static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
337*4882a593Smuzhiyun unsigned long pending_mask;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun pending_mask = pending_irqs_no_gisa(vcpu);
340*4882a593Smuzhiyun if (gi->origin)
341*4882a593Smuzhiyun pending_mask |= gisa_get_ipm(gi->origin) << IRQ_PEND_IO_ISC_7;
342*4882a593Smuzhiyun return pending_mask;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
isc_to_irq_type(unsigned long isc)345*4882a593Smuzhiyun static inline int isc_to_irq_type(unsigned long isc)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun return IRQ_PEND_IO_ISC_0 - isc;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun
irq_type_to_isc(unsigned long irq_type)350*4882a593Smuzhiyun static inline int irq_type_to_isc(unsigned long irq_type)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun return IRQ_PEND_IO_ISC_0 - irq_type;
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun
disable_iscs(struct kvm_vcpu * vcpu,unsigned long active_mask)355*4882a593Smuzhiyun static unsigned long disable_iscs(struct kvm_vcpu *vcpu,
356*4882a593Smuzhiyun unsigned long active_mask)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun int i;
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun for (i = 0; i <= MAX_ISC; i++)
361*4882a593Smuzhiyun if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i)))
362*4882a593Smuzhiyun active_mask &= ~(1UL << (isc_to_irq_type(i)));
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun return active_mask;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
deliverable_irqs(struct kvm_vcpu * vcpu)367*4882a593Smuzhiyun static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun unsigned long active_mask;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun active_mask = pending_irqs(vcpu);
372*4882a593Smuzhiyun if (!active_mask)
373*4882a593Smuzhiyun return 0;
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun if (psw_extint_disabled(vcpu))
376*4882a593Smuzhiyun active_mask &= ~IRQ_PEND_EXT_MASK;
377*4882a593Smuzhiyun if (psw_ioint_disabled(vcpu))
378*4882a593Smuzhiyun active_mask &= ~IRQ_PEND_IO_MASK;
379*4882a593Smuzhiyun else
380*4882a593Smuzhiyun active_mask = disable_iscs(vcpu, active_mask);
381*4882a593Smuzhiyun if (!(vcpu->arch.sie_block->gcr[0] & CR0_EXTERNAL_CALL_SUBMASK))
382*4882a593Smuzhiyun __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask);
383*4882a593Smuzhiyun if (!(vcpu->arch.sie_block->gcr[0] & CR0_EMERGENCY_SIGNAL_SUBMASK))
384*4882a593Smuzhiyun __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask);
385*4882a593Smuzhiyun if (!(vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SUBMASK))
386*4882a593Smuzhiyun __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
387*4882a593Smuzhiyun if (!(vcpu->arch.sie_block->gcr[0] & CR0_CPU_TIMER_SUBMASK))
388*4882a593Smuzhiyun __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
389*4882a593Smuzhiyun if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK)) {
390*4882a593Smuzhiyun __clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask);
391*4882a593Smuzhiyun __clear_bit(IRQ_PEND_EXT_SERVICE_EV, &active_mask);
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun if (psw_mchk_disabled(vcpu))
394*4882a593Smuzhiyun active_mask &= ~IRQ_PEND_MCHK_MASK;
395*4882a593Smuzhiyun /* PV guest cpus can have a single interruption injected at a time. */
396*4882a593Smuzhiyun if (kvm_s390_pv_cpu_get_handle(vcpu) &&
397*4882a593Smuzhiyun vcpu->arch.sie_block->iictl != IICTL_CODE_NONE)
398*4882a593Smuzhiyun active_mask &= ~(IRQ_PEND_EXT_II_MASK |
399*4882a593Smuzhiyun IRQ_PEND_IO_MASK |
400*4882a593Smuzhiyun IRQ_PEND_MCHK_MASK);
401*4882a593Smuzhiyun /*
402*4882a593Smuzhiyun * Check both floating and local interrupt's cr14 because
403*4882a593Smuzhiyun * bit IRQ_PEND_MCHK_REP could be set in both cases.
404*4882a593Smuzhiyun */
405*4882a593Smuzhiyun if (!(vcpu->arch.sie_block->gcr[14] &
406*4882a593Smuzhiyun (vcpu->kvm->arch.float_int.mchk.cr14 |
407*4882a593Smuzhiyun vcpu->arch.local_int.irq.mchk.cr14)))
408*4882a593Smuzhiyun __clear_bit(IRQ_PEND_MCHK_REP, &active_mask);
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun /*
411*4882a593Smuzhiyun * STOP irqs will never be actively delivered. They are triggered via
412*4882a593Smuzhiyun * intercept requests and cleared when the stop intercept is performed.
413*4882a593Smuzhiyun */
414*4882a593Smuzhiyun __clear_bit(IRQ_PEND_SIGP_STOP, &active_mask);
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun return active_mask;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
__set_cpu_idle(struct kvm_vcpu * vcpu)419*4882a593Smuzhiyun static void __set_cpu_idle(struct kvm_vcpu *vcpu)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
422*4882a593Smuzhiyun set_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
__unset_cpu_idle(struct kvm_vcpu * vcpu)425*4882a593Smuzhiyun static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
426*4882a593Smuzhiyun {
427*4882a593Smuzhiyun kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
428*4882a593Smuzhiyun clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun
__reset_intercept_indicators(struct kvm_vcpu * vcpu)431*4882a593Smuzhiyun static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IO_INT | CPUSTAT_EXT_INT |
434*4882a593Smuzhiyun CPUSTAT_STOP_INT);
435*4882a593Smuzhiyun vcpu->arch.sie_block->lctl = 0x0000;
436*4882a593Smuzhiyun vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun if (guestdbg_enabled(vcpu)) {
439*4882a593Smuzhiyun vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 |
440*4882a593Smuzhiyun LCTL_CR10 | LCTL_CR11);
441*4882a593Smuzhiyun vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun
set_intercept_indicators_io(struct kvm_vcpu * vcpu)445*4882a593Smuzhiyun static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
446*4882a593Smuzhiyun {
447*4882a593Smuzhiyun if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_IO_MASK))
448*4882a593Smuzhiyun return;
449*4882a593Smuzhiyun if (psw_ioint_disabled(vcpu))
450*4882a593Smuzhiyun kvm_s390_set_cpuflags(vcpu, CPUSTAT_IO_INT);
451*4882a593Smuzhiyun else
452*4882a593Smuzhiyun vcpu->arch.sie_block->lctl |= LCTL_CR6;
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun
set_intercept_indicators_ext(struct kvm_vcpu * vcpu)455*4882a593Smuzhiyun static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
456*4882a593Smuzhiyun {
457*4882a593Smuzhiyun if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_EXT_MASK))
458*4882a593Smuzhiyun return;
459*4882a593Smuzhiyun if (psw_extint_disabled(vcpu))
460*4882a593Smuzhiyun kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
461*4882a593Smuzhiyun else
462*4882a593Smuzhiyun vcpu->arch.sie_block->lctl |= LCTL_CR0;
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun
set_intercept_indicators_mchk(struct kvm_vcpu * vcpu)465*4882a593Smuzhiyun static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
466*4882a593Smuzhiyun {
467*4882a593Smuzhiyun if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_MCHK_MASK))
468*4882a593Smuzhiyun return;
469*4882a593Smuzhiyun if (psw_mchk_disabled(vcpu))
470*4882a593Smuzhiyun vcpu->arch.sie_block->ictl |= ICTL_LPSW;
471*4882a593Smuzhiyun else
472*4882a593Smuzhiyun vcpu->arch.sie_block->lctl |= LCTL_CR14;
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun
set_intercept_indicators_stop(struct kvm_vcpu * vcpu)475*4882a593Smuzhiyun static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu)
476*4882a593Smuzhiyun {
477*4882a593Smuzhiyun if (kvm_s390_is_stop_irq_pending(vcpu))
478*4882a593Smuzhiyun kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun /* Set interception request for non-deliverable interrupts */
set_intercept_indicators(struct kvm_vcpu * vcpu)482*4882a593Smuzhiyun static void set_intercept_indicators(struct kvm_vcpu *vcpu)
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun set_intercept_indicators_io(vcpu);
485*4882a593Smuzhiyun set_intercept_indicators_ext(vcpu);
486*4882a593Smuzhiyun set_intercept_indicators_mchk(vcpu);
487*4882a593Smuzhiyun set_intercept_indicators_stop(vcpu);
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun
__deliver_cpu_timer(struct kvm_vcpu * vcpu)490*4882a593Smuzhiyun static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
491*4882a593Smuzhiyun {
492*4882a593Smuzhiyun struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
493*4882a593Smuzhiyun int rc = 0;
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun vcpu->stat.deliver_cputm++;
496*4882a593Smuzhiyun trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
497*4882a593Smuzhiyun 0, 0);
498*4882a593Smuzhiyun if (kvm_s390_pv_cpu_is_protected(vcpu)) {
499*4882a593Smuzhiyun vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
500*4882a593Smuzhiyun vcpu->arch.sie_block->eic = EXT_IRQ_CPU_TIMER;
501*4882a593Smuzhiyun } else {
502*4882a593Smuzhiyun rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
503*4882a593Smuzhiyun (u16 *)__LC_EXT_INT_CODE);
504*4882a593Smuzhiyun rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
505*4882a593Smuzhiyun rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
506*4882a593Smuzhiyun &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
507*4882a593Smuzhiyun rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
508*4882a593Smuzhiyun &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
511*4882a593Smuzhiyun return rc ? -EFAULT : 0;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun
__deliver_ckc(struct kvm_vcpu * vcpu)514*4882a593Smuzhiyun static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu)
515*4882a593Smuzhiyun {
516*4882a593Smuzhiyun struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
517*4882a593Smuzhiyun int rc = 0;
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun vcpu->stat.deliver_ckc++;
520*4882a593Smuzhiyun trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
521*4882a593Smuzhiyun 0, 0);
522*4882a593Smuzhiyun if (kvm_s390_pv_cpu_is_protected(vcpu)) {
523*4882a593Smuzhiyun vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
524*4882a593Smuzhiyun vcpu->arch.sie_block->eic = EXT_IRQ_CLK_COMP;
525*4882a593Smuzhiyun } else {
526*4882a593Smuzhiyun rc = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
527*4882a593Smuzhiyun (u16 __user *)__LC_EXT_INT_CODE);
528*4882a593Smuzhiyun rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
529*4882a593Smuzhiyun rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
530*4882a593Smuzhiyun &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
531*4882a593Smuzhiyun rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
532*4882a593Smuzhiyun &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
535*4882a593Smuzhiyun return rc ? -EFAULT : 0;
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun
__deliver_pfault_init(struct kvm_vcpu * vcpu)538*4882a593Smuzhiyun static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
539*4882a593Smuzhiyun {
540*4882a593Smuzhiyun struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
541*4882a593Smuzhiyun struct kvm_s390_ext_info ext;
542*4882a593Smuzhiyun int rc;
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun spin_lock(&li->lock);
545*4882a593Smuzhiyun ext = li->irq.ext;
546*4882a593Smuzhiyun clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
547*4882a593Smuzhiyun li->irq.ext.ext_params2 = 0;
548*4882a593Smuzhiyun spin_unlock(&li->lock);
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun VCPU_EVENT(vcpu, 4, "deliver: pfault init token 0x%llx",
551*4882a593Smuzhiyun ext.ext_params2);
552*4882a593Smuzhiyun trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
553*4882a593Smuzhiyun KVM_S390_INT_PFAULT_INIT,
554*4882a593Smuzhiyun 0, ext.ext_params2);
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE);
557*4882a593Smuzhiyun rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
558*4882a593Smuzhiyun rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
559*4882a593Smuzhiyun &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
560*4882a593Smuzhiyun rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
561*4882a593Smuzhiyun &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
562*4882a593Smuzhiyun rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2);
563*4882a593Smuzhiyun return rc ? -EFAULT : 0;
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun
__write_machine_check(struct kvm_vcpu * vcpu,struct kvm_s390_mchk_info * mchk)566*4882a593Smuzhiyun static int __write_machine_check(struct kvm_vcpu *vcpu,
567*4882a593Smuzhiyun struct kvm_s390_mchk_info *mchk)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun unsigned long ext_sa_addr;
570*4882a593Smuzhiyun unsigned long lc;
571*4882a593Smuzhiyun freg_t fprs[NUM_FPRS];
572*4882a593Smuzhiyun union mci mci;
573*4882a593Smuzhiyun int rc;
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun /*
576*4882a593Smuzhiyun * All other possible payload for a machine check (e.g. the register
577*4882a593Smuzhiyun * contents in the save area) will be handled by the ultravisor, as
578*4882a593Smuzhiyun * the hypervisor does not not have the needed information for
579*4882a593Smuzhiyun * protected guests.
580*4882a593Smuzhiyun */
581*4882a593Smuzhiyun if (kvm_s390_pv_cpu_is_protected(vcpu)) {
582*4882a593Smuzhiyun vcpu->arch.sie_block->iictl = IICTL_CODE_MCHK;
583*4882a593Smuzhiyun vcpu->arch.sie_block->mcic = mchk->mcic;
584*4882a593Smuzhiyun vcpu->arch.sie_block->faddr = mchk->failing_storage_address;
585*4882a593Smuzhiyun vcpu->arch.sie_block->edc = mchk->ext_damage_code;
586*4882a593Smuzhiyun return 0;
587*4882a593Smuzhiyun }
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun mci.val = mchk->mcic;
590*4882a593Smuzhiyun /* take care of lazy register loading */
591*4882a593Smuzhiyun save_fpu_regs();
592*4882a593Smuzhiyun save_access_regs(vcpu->run->s.regs.acrs);
593*4882a593Smuzhiyun if (MACHINE_HAS_GS && vcpu->arch.gs_enabled)
594*4882a593Smuzhiyun save_gs_cb(current->thread.gs_cb);
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun /* Extended save area */
597*4882a593Smuzhiyun rc = read_guest_lc(vcpu, __LC_MCESAD, &ext_sa_addr,
598*4882a593Smuzhiyun sizeof(unsigned long));
599*4882a593Smuzhiyun /* Only bits 0 through 63-LC are used for address formation */
600*4882a593Smuzhiyun lc = ext_sa_addr & MCESA_LC_MASK;
601*4882a593Smuzhiyun if (test_kvm_facility(vcpu->kvm, 133)) {
602*4882a593Smuzhiyun switch (lc) {
603*4882a593Smuzhiyun case 0:
604*4882a593Smuzhiyun case 10:
605*4882a593Smuzhiyun ext_sa_addr &= ~0x3ffUL;
606*4882a593Smuzhiyun break;
607*4882a593Smuzhiyun case 11:
608*4882a593Smuzhiyun ext_sa_addr &= ~0x7ffUL;
609*4882a593Smuzhiyun break;
610*4882a593Smuzhiyun case 12:
611*4882a593Smuzhiyun ext_sa_addr &= ~0xfffUL;
612*4882a593Smuzhiyun break;
613*4882a593Smuzhiyun default:
614*4882a593Smuzhiyun ext_sa_addr = 0;
615*4882a593Smuzhiyun break;
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun } else {
618*4882a593Smuzhiyun ext_sa_addr &= ~0x3ffUL;
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun if (!rc && mci.vr && ext_sa_addr && test_kvm_facility(vcpu->kvm, 129)) {
622*4882a593Smuzhiyun if (write_guest_abs(vcpu, ext_sa_addr, vcpu->run->s.regs.vrs,
623*4882a593Smuzhiyun 512))
624*4882a593Smuzhiyun mci.vr = 0;
625*4882a593Smuzhiyun } else {
626*4882a593Smuzhiyun mci.vr = 0;
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun if (!rc && mci.gs && ext_sa_addr && test_kvm_facility(vcpu->kvm, 133)
629*4882a593Smuzhiyun && (lc == 11 || lc == 12)) {
630*4882a593Smuzhiyun if (write_guest_abs(vcpu, ext_sa_addr + 1024,
631*4882a593Smuzhiyun &vcpu->run->s.regs.gscb, 32))
632*4882a593Smuzhiyun mci.gs = 0;
633*4882a593Smuzhiyun } else {
634*4882a593Smuzhiyun mci.gs = 0;
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun /* General interruption information */
638*4882a593Smuzhiyun rc |= put_guest_lc(vcpu, 1, (u8 __user *) __LC_AR_MODE_ID);
639*4882a593Smuzhiyun rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
640*4882a593Smuzhiyun &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
641*4882a593Smuzhiyun rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
642*4882a593Smuzhiyun &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
643*4882a593Smuzhiyun rc |= put_guest_lc(vcpu, mci.val, (u64 __user *) __LC_MCCK_CODE);
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun /* Register-save areas */
646*4882a593Smuzhiyun if (MACHINE_HAS_VX) {
647*4882a593Smuzhiyun convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
648*4882a593Smuzhiyun rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA, fprs, 128);
649*4882a593Smuzhiyun } else {
650*4882a593Smuzhiyun rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA,
651*4882a593Smuzhiyun vcpu->run->s.regs.fprs, 128);
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun rc |= write_guest_lc(vcpu, __LC_GPREGS_SAVE_AREA,
654*4882a593Smuzhiyun vcpu->run->s.regs.gprs, 128);
655*4882a593Smuzhiyun rc |= put_guest_lc(vcpu, current->thread.fpu.fpc,
656*4882a593Smuzhiyun (u32 __user *) __LC_FP_CREG_SAVE_AREA);
657*4882a593Smuzhiyun rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->todpr,
658*4882a593Smuzhiyun (u32 __user *) __LC_TOD_PROGREG_SAVE_AREA);
659*4882a593Smuzhiyun rc |= put_guest_lc(vcpu, kvm_s390_get_cpu_timer(vcpu),
660*4882a593Smuzhiyun (u64 __user *) __LC_CPU_TIMER_SAVE_AREA);
661*4882a593Smuzhiyun rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->ckc >> 8,
662*4882a593Smuzhiyun (u64 __user *) __LC_CLOCK_COMP_SAVE_AREA);
663*4882a593Smuzhiyun rc |= write_guest_lc(vcpu, __LC_AREGS_SAVE_AREA,
664*4882a593Smuzhiyun &vcpu->run->s.regs.acrs, 64);
665*4882a593Smuzhiyun rc |= write_guest_lc(vcpu, __LC_CREGS_SAVE_AREA,
666*4882a593Smuzhiyun &vcpu->arch.sie_block->gcr, 128);
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun /* Extended interruption information */
669*4882a593Smuzhiyun rc |= put_guest_lc(vcpu, mchk->ext_damage_code,
670*4882a593Smuzhiyun (u32 __user *) __LC_EXT_DAMAGE_CODE);
671*4882a593Smuzhiyun rc |= put_guest_lc(vcpu, mchk->failing_storage_address,
672*4882a593Smuzhiyun (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
673*4882a593Smuzhiyun rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, &mchk->fixed_logout,
674*4882a593Smuzhiyun sizeof(mchk->fixed_logout));
675*4882a593Smuzhiyun return rc ? -EFAULT : 0;
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun
__deliver_machine_check(struct kvm_vcpu * vcpu)678*4882a593Smuzhiyun static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
679*4882a593Smuzhiyun {
680*4882a593Smuzhiyun struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
681*4882a593Smuzhiyun struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
682*4882a593Smuzhiyun struct kvm_s390_mchk_info mchk = {};
683*4882a593Smuzhiyun int deliver = 0;
684*4882a593Smuzhiyun int rc = 0;
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun spin_lock(&fi->lock);
687*4882a593Smuzhiyun spin_lock(&li->lock);
688*4882a593Smuzhiyun if (test_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs) ||
689*4882a593Smuzhiyun test_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs)) {
690*4882a593Smuzhiyun /*
691*4882a593Smuzhiyun * If there was an exigent machine check pending, then any
692*4882a593Smuzhiyun * repressible machine checks that might have been pending
693*4882a593Smuzhiyun * are indicated along with it, so always clear bits for
694*4882a593Smuzhiyun * repressible and exigent interrupts
695*4882a593Smuzhiyun */
696*4882a593Smuzhiyun mchk = li->irq.mchk;
697*4882a593Smuzhiyun clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
698*4882a593Smuzhiyun clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
699*4882a593Smuzhiyun memset(&li->irq.mchk, 0, sizeof(mchk));
700*4882a593Smuzhiyun deliver = 1;
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun /*
703*4882a593Smuzhiyun * We indicate floating repressible conditions along with
704*4882a593Smuzhiyun * other pending conditions. Channel Report Pending and Channel
705*4882a593Smuzhiyun * Subsystem damage are the only two and and are indicated by
706*4882a593Smuzhiyun * bits in mcic and masked in cr14.
707*4882a593Smuzhiyun */
708*4882a593Smuzhiyun if (test_and_clear_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
709*4882a593Smuzhiyun mchk.mcic |= fi->mchk.mcic;
710*4882a593Smuzhiyun mchk.cr14 |= fi->mchk.cr14;
711*4882a593Smuzhiyun memset(&fi->mchk, 0, sizeof(mchk));
712*4882a593Smuzhiyun deliver = 1;
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun spin_unlock(&li->lock);
715*4882a593Smuzhiyun spin_unlock(&fi->lock);
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun if (deliver) {
718*4882a593Smuzhiyun VCPU_EVENT(vcpu, 3, "deliver: machine check mcic 0x%llx",
719*4882a593Smuzhiyun mchk.mcic);
720*4882a593Smuzhiyun trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
721*4882a593Smuzhiyun KVM_S390_MCHK,
722*4882a593Smuzhiyun mchk.cr14, mchk.mcic);
723*4882a593Smuzhiyun vcpu->stat.deliver_machine_check++;
724*4882a593Smuzhiyun rc = __write_machine_check(vcpu, &mchk);
725*4882a593Smuzhiyun }
726*4882a593Smuzhiyun return rc;
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun
__deliver_restart(struct kvm_vcpu * vcpu)729*4882a593Smuzhiyun static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
730*4882a593Smuzhiyun {
731*4882a593Smuzhiyun struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
732*4882a593Smuzhiyun int rc = 0;
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun VCPU_EVENT(vcpu, 3, "%s", "deliver: cpu restart");
735*4882a593Smuzhiyun vcpu->stat.deliver_restart_signal++;
736*4882a593Smuzhiyun trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun if (kvm_s390_pv_cpu_is_protected(vcpu)) {
739*4882a593Smuzhiyun vcpu->arch.sie_block->iictl = IICTL_CODE_RESTART;
740*4882a593Smuzhiyun } else {
741*4882a593Smuzhiyun rc = write_guest_lc(vcpu,
742*4882a593Smuzhiyun offsetof(struct lowcore, restart_old_psw),
743*4882a593Smuzhiyun &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
744*4882a593Smuzhiyun rc |= read_guest_lc(vcpu, offsetof(struct lowcore, restart_psw),
745*4882a593Smuzhiyun &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun clear_bit(IRQ_PEND_RESTART, &li->pending_irqs);
748*4882a593Smuzhiyun return rc ? -EFAULT : 0;
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun
__deliver_set_prefix(struct kvm_vcpu * vcpu)751*4882a593Smuzhiyun static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
752*4882a593Smuzhiyun {
753*4882a593Smuzhiyun struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
754*4882a593Smuzhiyun struct kvm_s390_prefix_info prefix;
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun spin_lock(&li->lock);
757*4882a593Smuzhiyun prefix = li->irq.prefix;
758*4882a593Smuzhiyun li->irq.prefix.address = 0;
759*4882a593Smuzhiyun clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
760*4882a593Smuzhiyun spin_unlock(&li->lock);
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun vcpu->stat.deliver_prefix_signal++;
763*4882a593Smuzhiyun trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
764*4882a593Smuzhiyun KVM_S390_SIGP_SET_PREFIX,
765*4882a593Smuzhiyun prefix.address, 0);
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun kvm_s390_set_prefix(vcpu, prefix.address);
768*4882a593Smuzhiyun return 0;
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun
__deliver_emergency_signal(struct kvm_vcpu * vcpu)771*4882a593Smuzhiyun static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu)
772*4882a593Smuzhiyun {
773*4882a593Smuzhiyun struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
774*4882a593Smuzhiyun int rc;
775*4882a593Smuzhiyun int cpu_addr;
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun spin_lock(&li->lock);
778*4882a593Smuzhiyun cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS);
779*4882a593Smuzhiyun clear_bit(cpu_addr, li->sigp_emerg_pending);
780*4882a593Smuzhiyun if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS))
781*4882a593Smuzhiyun clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
782*4882a593Smuzhiyun spin_unlock(&li->lock);
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp emerg");
785*4882a593Smuzhiyun vcpu->stat.deliver_emergency_signal++;
786*4882a593Smuzhiyun trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
787*4882a593Smuzhiyun cpu_addr, 0);
788*4882a593Smuzhiyun if (kvm_s390_pv_cpu_is_protected(vcpu)) {
789*4882a593Smuzhiyun vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
790*4882a593Smuzhiyun vcpu->arch.sie_block->eic = EXT_IRQ_EMERGENCY_SIG;
791*4882a593Smuzhiyun vcpu->arch.sie_block->extcpuaddr = cpu_addr;
792*4882a593Smuzhiyun return 0;
793*4882a593Smuzhiyun }
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun rc = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG,
796*4882a593Smuzhiyun (u16 *)__LC_EXT_INT_CODE);
797*4882a593Smuzhiyun rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR);
798*4882a593Smuzhiyun rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
799*4882a593Smuzhiyun &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
800*4882a593Smuzhiyun rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
801*4882a593Smuzhiyun &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
802*4882a593Smuzhiyun return rc ? -EFAULT : 0;
803*4882a593Smuzhiyun }
804*4882a593Smuzhiyun
__deliver_external_call(struct kvm_vcpu * vcpu)805*4882a593Smuzhiyun static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
806*4882a593Smuzhiyun {
807*4882a593Smuzhiyun struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
808*4882a593Smuzhiyun struct kvm_s390_extcall_info extcall;
809*4882a593Smuzhiyun int rc;
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun spin_lock(&li->lock);
812*4882a593Smuzhiyun extcall = li->irq.extcall;
813*4882a593Smuzhiyun li->irq.extcall.code = 0;
814*4882a593Smuzhiyun clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
815*4882a593Smuzhiyun spin_unlock(&li->lock);
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp ext call");
818*4882a593Smuzhiyun vcpu->stat.deliver_external_call++;
819*4882a593Smuzhiyun trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
820*4882a593Smuzhiyun KVM_S390_INT_EXTERNAL_CALL,
821*4882a593Smuzhiyun extcall.code, 0);
822*4882a593Smuzhiyun if (kvm_s390_pv_cpu_is_protected(vcpu)) {
823*4882a593Smuzhiyun vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
824*4882a593Smuzhiyun vcpu->arch.sie_block->eic = EXT_IRQ_EXTERNAL_CALL;
825*4882a593Smuzhiyun vcpu->arch.sie_block->extcpuaddr = extcall.code;
826*4882a593Smuzhiyun return 0;
827*4882a593Smuzhiyun }
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun rc = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL,
830*4882a593Smuzhiyun (u16 *)__LC_EXT_INT_CODE);
831*4882a593Smuzhiyun rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR);
832*4882a593Smuzhiyun rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
833*4882a593Smuzhiyun &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
834*4882a593Smuzhiyun rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw,
835*4882a593Smuzhiyun sizeof(psw_t));
836*4882a593Smuzhiyun return rc ? -EFAULT : 0;
837*4882a593Smuzhiyun }
838*4882a593Smuzhiyun
__deliver_prog_pv(struct kvm_vcpu * vcpu,u16 code)839*4882a593Smuzhiyun static int __deliver_prog_pv(struct kvm_vcpu *vcpu, u16 code)
840*4882a593Smuzhiyun {
841*4882a593Smuzhiyun switch (code) {
842*4882a593Smuzhiyun case PGM_SPECIFICATION:
843*4882a593Smuzhiyun vcpu->arch.sie_block->iictl = IICTL_CODE_SPECIFICATION;
844*4882a593Smuzhiyun break;
845*4882a593Smuzhiyun case PGM_OPERAND:
846*4882a593Smuzhiyun vcpu->arch.sie_block->iictl = IICTL_CODE_OPERAND;
847*4882a593Smuzhiyun break;
848*4882a593Smuzhiyun default:
849*4882a593Smuzhiyun return -EINVAL;
850*4882a593Smuzhiyun }
851*4882a593Smuzhiyun return 0;
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun
__deliver_prog(struct kvm_vcpu * vcpu)854*4882a593Smuzhiyun static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
855*4882a593Smuzhiyun {
856*4882a593Smuzhiyun struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
857*4882a593Smuzhiyun struct kvm_s390_pgm_info pgm_info;
858*4882a593Smuzhiyun int rc = 0, nullifying = false;
859*4882a593Smuzhiyun u16 ilen;
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun spin_lock(&li->lock);
862*4882a593Smuzhiyun pgm_info = li->irq.pgm;
863*4882a593Smuzhiyun clear_bit(IRQ_PEND_PROG, &li->pending_irqs);
864*4882a593Smuzhiyun memset(&li->irq.pgm, 0, sizeof(pgm_info));
865*4882a593Smuzhiyun spin_unlock(&li->lock);
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun ilen = pgm_info.flags & KVM_S390_PGM_FLAGS_ILC_MASK;
868*4882a593Smuzhiyun VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilen:%d",
869*4882a593Smuzhiyun pgm_info.code, ilen);
870*4882a593Smuzhiyun vcpu->stat.deliver_program++;
871*4882a593Smuzhiyun trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
872*4882a593Smuzhiyun pgm_info.code, 0);
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun /* PER is handled by the ultravisor */
875*4882a593Smuzhiyun if (kvm_s390_pv_cpu_is_protected(vcpu))
876*4882a593Smuzhiyun return __deliver_prog_pv(vcpu, pgm_info.code & ~PGM_PER);
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun switch (pgm_info.code & ~PGM_PER) {
879*4882a593Smuzhiyun case PGM_AFX_TRANSLATION:
880*4882a593Smuzhiyun case PGM_ASX_TRANSLATION:
881*4882a593Smuzhiyun case PGM_EX_TRANSLATION:
882*4882a593Smuzhiyun case PGM_LFX_TRANSLATION:
883*4882a593Smuzhiyun case PGM_LSTE_SEQUENCE:
884*4882a593Smuzhiyun case PGM_LSX_TRANSLATION:
885*4882a593Smuzhiyun case PGM_LX_TRANSLATION:
886*4882a593Smuzhiyun case PGM_PRIMARY_AUTHORITY:
887*4882a593Smuzhiyun case PGM_SECONDARY_AUTHORITY:
888*4882a593Smuzhiyun nullifying = true;
889*4882a593Smuzhiyun fallthrough;
890*4882a593Smuzhiyun case PGM_SPACE_SWITCH:
891*4882a593Smuzhiyun rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
892*4882a593Smuzhiyun (u64 *)__LC_TRANS_EXC_CODE);
893*4882a593Smuzhiyun break;
894*4882a593Smuzhiyun case PGM_ALEN_TRANSLATION:
895*4882a593Smuzhiyun case PGM_ALE_SEQUENCE:
896*4882a593Smuzhiyun case PGM_ASTE_INSTANCE:
897*4882a593Smuzhiyun case PGM_ASTE_SEQUENCE:
898*4882a593Smuzhiyun case PGM_ASTE_VALIDITY:
899*4882a593Smuzhiyun case PGM_EXTENDED_AUTHORITY:
900*4882a593Smuzhiyun rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
901*4882a593Smuzhiyun (u8 *)__LC_EXC_ACCESS_ID);
902*4882a593Smuzhiyun nullifying = true;
903*4882a593Smuzhiyun break;
904*4882a593Smuzhiyun case PGM_ASCE_TYPE:
905*4882a593Smuzhiyun case PGM_PAGE_TRANSLATION:
906*4882a593Smuzhiyun case PGM_REGION_FIRST_TRANS:
907*4882a593Smuzhiyun case PGM_REGION_SECOND_TRANS:
908*4882a593Smuzhiyun case PGM_REGION_THIRD_TRANS:
909*4882a593Smuzhiyun case PGM_SEGMENT_TRANSLATION:
910*4882a593Smuzhiyun rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
911*4882a593Smuzhiyun (u64 *)__LC_TRANS_EXC_CODE);
912*4882a593Smuzhiyun rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
913*4882a593Smuzhiyun (u8 *)__LC_EXC_ACCESS_ID);
914*4882a593Smuzhiyun rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
915*4882a593Smuzhiyun (u8 *)__LC_OP_ACCESS_ID);
916*4882a593Smuzhiyun nullifying = true;
917*4882a593Smuzhiyun break;
918*4882a593Smuzhiyun case PGM_MONITOR:
919*4882a593Smuzhiyun rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
920*4882a593Smuzhiyun (u16 *)__LC_MON_CLASS_NR);
921*4882a593Smuzhiyun rc |= put_guest_lc(vcpu, pgm_info.mon_code,
922*4882a593Smuzhiyun (u64 *)__LC_MON_CODE);
923*4882a593Smuzhiyun break;
924*4882a593Smuzhiyun case PGM_VECTOR_PROCESSING:
925*4882a593Smuzhiyun case PGM_DATA:
926*4882a593Smuzhiyun rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
927*4882a593Smuzhiyun (u32 *)__LC_DATA_EXC_CODE);
928*4882a593Smuzhiyun break;
929*4882a593Smuzhiyun case PGM_PROTECTION:
930*4882a593Smuzhiyun rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
931*4882a593Smuzhiyun (u64 *)__LC_TRANS_EXC_CODE);
932*4882a593Smuzhiyun rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
933*4882a593Smuzhiyun (u8 *)__LC_EXC_ACCESS_ID);
934*4882a593Smuzhiyun break;
935*4882a593Smuzhiyun case PGM_STACK_FULL:
936*4882a593Smuzhiyun case PGM_STACK_EMPTY:
937*4882a593Smuzhiyun case PGM_STACK_SPECIFICATION:
938*4882a593Smuzhiyun case PGM_STACK_TYPE:
939*4882a593Smuzhiyun case PGM_STACK_OPERATION:
940*4882a593Smuzhiyun case PGM_TRACE_TABEL:
941*4882a593Smuzhiyun case PGM_CRYPTO_OPERATION:
942*4882a593Smuzhiyun nullifying = true;
943*4882a593Smuzhiyun break;
944*4882a593Smuzhiyun }
945*4882a593Smuzhiyun
946*4882a593Smuzhiyun if (pgm_info.code & PGM_PER) {
947*4882a593Smuzhiyun rc |= put_guest_lc(vcpu, pgm_info.per_code,
948*4882a593Smuzhiyun (u8 *) __LC_PER_CODE);
949*4882a593Smuzhiyun rc |= put_guest_lc(vcpu, pgm_info.per_atmid,
950*4882a593Smuzhiyun (u8 *)__LC_PER_ATMID);
951*4882a593Smuzhiyun rc |= put_guest_lc(vcpu, pgm_info.per_address,
952*4882a593Smuzhiyun (u64 *) __LC_PER_ADDRESS);
953*4882a593Smuzhiyun rc |= put_guest_lc(vcpu, pgm_info.per_access_id,
954*4882a593Smuzhiyun (u8 *) __LC_PER_ACCESS_ID);
955*4882a593Smuzhiyun }
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun if (nullifying && !(pgm_info.flags & KVM_S390_PGM_FLAGS_NO_REWIND))
958*4882a593Smuzhiyun kvm_s390_rewind_psw(vcpu, ilen);
959*4882a593Smuzhiyun
960*4882a593Smuzhiyun /* bit 1+2 of the target are the ilc, so we can directly use ilen */
961*4882a593Smuzhiyun rc |= put_guest_lc(vcpu, ilen, (u16 *) __LC_PGM_ILC);
962*4882a593Smuzhiyun rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea,
963*4882a593Smuzhiyun (u64 *) __LC_LAST_BREAK);
964*4882a593Smuzhiyun rc |= put_guest_lc(vcpu, pgm_info.code,
965*4882a593Smuzhiyun (u16 *)__LC_PGM_INT_CODE);
966*4882a593Smuzhiyun rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
967*4882a593Smuzhiyun &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
968*4882a593Smuzhiyun rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
969*4882a593Smuzhiyun &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
970*4882a593Smuzhiyun return rc ? -EFAULT : 0;
971*4882a593Smuzhiyun }
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun #define SCCB_MASK 0xFFFFFFF8
974*4882a593Smuzhiyun #define SCCB_EVENT_PENDING 0x3
975*4882a593Smuzhiyun
write_sclp(struct kvm_vcpu * vcpu,u32 parm)976*4882a593Smuzhiyun static int write_sclp(struct kvm_vcpu *vcpu, u32 parm)
977*4882a593Smuzhiyun {
978*4882a593Smuzhiyun int rc;
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun if (kvm_s390_pv_cpu_get_handle(vcpu)) {
981*4882a593Smuzhiyun vcpu->arch.sie_block->iictl = IICTL_CODE_EXT;
982*4882a593Smuzhiyun vcpu->arch.sie_block->eic = EXT_IRQ_SERVICE_SIG;
983*4882a593Smuzhiyun vcpu->arch.sie_block->eiparams = parm;
984*4882a593Smuzhiyun return 0;
985*4882a593Smuzhiyun }
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
988*4882a593Smuzhiyun rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
989*4882a593Smuzhiyun rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
990*4882a593Smuzhiyun &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
991*4882a593Smuzhiyun rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
992*4882a593Smuzhiyun &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
993*4882a593Smuzhiyun rc |= put_guest_lc(vcpu, parm,
994*4882a593Smuzhiyun (u32 *)__LC_EXT_PARAMS);
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun return rc ? -EFAULT : 0;
997*4882a593Smuzhiyun }
998*4882a593Smuzhiyun
__deliver_service(struct kvm_vcpu * vcpu)999*4882a593Smuzhiyun static int __must_check __deliver_service(struct kvm_vcpu *vcpu)
1000*4882a593Smuzhiyun {
1001*4882a593Smuzhiyun struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
1002*4882a593Smuzhiyun struct kvm_s390_ext_info ext;
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun spin_lock(&fi->lock);
1005*4882a593Smuzhiyun if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->masked_irqs) ||
1006*4882a593Smuzhiyun !(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) {
1007*4882a593Smuzhiyun spin_unlock(&fi->lock);
1008*4882a593Smuzhiyun return 0;
1009*4882a593Smuzhiyun }
1010*4882a593Smuzhiyun ext = fi->srv_signal;
1011*4882a593Smuzhiyun memset(&fi->srv_signal, 0, sizeof(ext));
1012*4882a593Smuzhiyun clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
1013*4882a593Smuzhiyun clear_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs);
1014*4882a593Smuzhiyun if (kvm_s390_pv_cpu_is_protected(vcpu))
1015*4882a593Smuzhiyun set_bit(IRQ_PEND_EXT_SERVICE, &fi->masked_irqs);
1016*4882a593Smuzhiyun spin_unlock(&fi->lock);
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x",
1019*4882a593Smuzhiyun ext.ext_params);
1020*4882a593Smuzhiyun vcpu->stat.deliver_service_signal++;
1021*4882a593Smuzhiyun trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
1022*4882a593Smuzhiyun ext.ext_params, 0);
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun return write_sclp(vcpu, ext.ext_params);
1025*4882a593Smuzhiyun }
1026*4882a593Smuzhiyun
__deliver_service_ev(struct kvm_vcpu * vcpu)1027*4882a593Smuzhiyun static int __must_check __deliver_service_ev(struct kvm_vcpu *vcpu)
1028*4882a593Smuzhiyun {
1029*4882a593Smuzhiyun struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
1030*4882a593Smuzhiyun struct kvm_s390_ext_info ext;
1031*4882a593Smuzhiyun
1032*4882a593Smuzhiyun spin_lock(&fi->lock);
1033*4882a593Smuzhiyun if (!(test_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs))) {
1034*4882a593Smuzhiyun spin_unlock(&fi->lock);
1035*4882a593Smuzhiyun return 0;
1036*4882a593Smuzhiyun }
1037*4882a593Smuzhiyun ext = fi->srv_signal;
1038*4882a593Smuzhiyun /* only clear the event bit */
1039*4882a593Smuzhiyun fi->srv_signal.ext_params &= ~SCCB_EVENT_PENDING;
1040*4882a593Smuzhiyun clear_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs);
1041*4882a593Smuzhiyun spin_unlock(&fi->lock);
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun VCPU_EVENT(vcpu, 4, "%s", "deliver: sclp parameter event");
1044*4882a593Smuzhiyun vcpu->stat.deliver_service_signal++;
1045*4882a593Smuzhiyun trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
1046*4882a593Smuzhiyun ext.ext_params, 0);
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun return write_sclp(vcpu, SCCB_EVENT_PENDING);
1049*4882a593Smuzhiyun }
1050*4882a593Smuzhiyun
__deliver_pfault_done(struct kvm_vcpu * vcpu)1051*4882a593Smuzhiyun static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu)
1052*4882a593Smuzhiyun {
1053*4882a593Smuzhiyun struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
1054*4882a593Smuzhiyun struct kvm_s390_interrupt_info *inti;
1055*4882a593Smuzhiyun int rc = 0;
1056*4882a593Smuzhiyun
1057*4882a593Smuzhiyun spin_lock(&fi->lock);
1058*4882a593Smuzhiyun inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_PFAULT],
1059*4882a593Smuzhiyun struct kvm_s390_interrupt_info,
1060*4882a593Smuzhiyun list);
1061*4882a593Smuzhiyun if (inti) {
1062*4882a593Smuzhiyun list_del(&inti->list);
1063*4882a593Smuzhiyun fi->counters[FIRQ_CNTR_PFAULT] -= 1;
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun if (list_empty(&fi->lists[FIRQ_LIST_PFAULT]))
1066*4882a593Smuzhiyun clear_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
1067*4882a593Smuzhiyun spin_unlock(&fi->lock);
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun if (inti) {
1070*4882a593Smuzhiyun trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
1071*4882a593Smuzhiyun KVM_S390_INT_PFAULT_DONE, 0,
1072*4882a593Smuzhiyun inti->ext.ext_params2);
1073*4882a593Smuzhiyun VCPU_EVENT(vcpu, 4, "deliver: pfault done token 0x%llx",
1074*4882a593Smuzhiyun inti->ext.ext_params2);
1075*4882a593Smuzhiyun
1076*4882a593Smuzhiyun rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
1077*4882a593Smuzhiyun (u16 *)__LC_EXT_INT_CODE);
1078*4882a593Smuzhiyun rc |= put_guest_lc(vcpu, PFAULT_DONE,
1079*4882a593Smuzhiyun (u16 *)__LC_EXT_CPU_ADDR);
1080*4882a593Smuzhiyun rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
1081*4882a593Smuzhiyun &vcpu->arch.sie_block->gpsw,
1082*4882a593Smuzhiyun sizeof(psw_t));
1083*4882a593Smuzhiyun rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
1084*4882a593Smuzhiyun &vcpu->arch.sie_block->gpsw,
1085*4882a593Smuzhiyun sizeof(psw_t));
1086*4882a593Smuzhiyun rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
1087*4882a593Smuzhiyun (u64 *)__LC_EXT_PARAMS2);
1088*4882a593Smuzhiyun kfree(inti);
1089*4882a593Smuzhiyun }
1090*4882a593Smuzhiyun return rc ? -EFAULT : 0;
1091*4882a593Smuzhiyun }
1092*4882a593Smuzhiyun
__deliver_virtio(struct kvm_vcpu * vcpu)1093*4882a593Smuzhiyun static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu)
1094*4882a593Smuzhiyun {
1095*4882a593Smuzhiyun struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
1096*4882a593Smuzhiyun struct kvm_s390_interrupt_info *inti;
1097*4882a593Smuzhiyun int rc = 0;
1098*4882a593Smuzhiyun
1099*4882a593Smuzhiyun spin_lock(&fi->lock);
1100*4882a593Smuzhiyun inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_VIRTIO],
1101*4882a593Smuzhiyun struct kvm_s390_interrupt_info,
1102*4882a593Smuzhiyun list);
1103*4882a593Smuzhiyun if (inti) {
1104*4882a593Smuzhiyun VCPU_EVENT(vcpu, 4,
1105*4882a593Smuzhiyun "deliver: virtio parm: 0x%x,parm64: 0x%llx",
1106*4882a593Smuzhiyun inti->ext.ext_params, inti->ext.ext_params2);
1107*4882a593Smuzhiyun vcpu->stat.deliver_virtio++;
1108*4882a593Smuzhiyun trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
1109*4882a593Smuzhiyun inti->type,
1110*4882a593Smuzhiyun inti->ext.ext_params,
1111*4882a593Smuzhiyun inti->ext.ext_params2);
1112*4882a593Smuzhiyun list_del(&inti->list);
1113*4882a593Smuzhiyun fi->counters[FIRQ_CNTR_VIRTIO] -= 1;
1114*4882a593Smuzhiyun }
1115*4882a593Smuzhiyun if (list_empty(&fi->lists[FIRQ_LIST_VIRTIO]))
1116*4882a593Smuzhiyun clear_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
1117*4882a593Smuzhiyun spin_unlock(&fi->lock);
1118*4882a593Smuzhiyun
1119*4882a593Smuzhiyun if (inti) {
1120*4882a593Smuzhiyun rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
1121*4882a593Smuzhiyun (u16 *)__LC_EXT_INT_CODE);
1122*4882a593Smuzhiyun rc |= put_guest_lc(vcpu, VIRTIO_PARAM,
1123*4882a593Smuzhiyun (u16 *)__LC_EXT_CPU_ADDR);
1124*4882a593Smuzhiyun rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
1125*4882a593Smuzhiyun &vcpu->arch.sie_block->gpsw,
1126*4882a593Smuzhiyun sizeof(psw_t));
1127*4882a593Smuzhiyun rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
1128*4882a593Smuzhiyun &vcpu->arch.sie_block->gpsw,
1129*4882a593Smuzhiyun sizeof(psw_t));
1130*4882a593Smuzhiyun rc |= put_guest_lc(vcpu, inti->ext.ext_params,
1131*4882a593Smuzhiyun (u32 *)__LC_EXT_PARAMS);
1132*4882a593Smuzhiyun rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
1133*4882a593Smuzhiyun (u64 *)__LC_EXT_PARAMS2);
1134*4882a593Smuzhiyun kfree(inti);
1135*4882a593Smuzhiyun }
1136*4882a593Smuzhiyun return rc ? -EFAULT : 0;
1137*4882a593Smuzhiyun }
1138*4882a593Smuzhiyun
__do_deliver_io(struct kvm_vcpu * vcpu,struct kvm_s390_io_info * io)1139*4882a593Smuzhiyun static int __do_deliver_io(struct kvm_vcpu *vcpu, struct kvm_s390_io_info *io)
1140*4882a593Smuzhiyun {
1141*4882a593Smuzhiyun int rc;
1142*4882a593Smuzhiyun
1143*4882a593Smuzhiyun if (kvm_s390_pv_cpu_is_protected(vcpu)) {
1144*4882a593Smuzhiyun vcpu->arch.sie_block->iictl = IICTL_CODE_IO;
1145*4882a593Smuzhiyun vcpu->arch.sie_block->subchannel_id = io->subchannel_id;
1146*4882a593Smuzhiyun vcpu->arch.sie_block->subchannel_nr = io->subchannel_nr;
1147*4882a593Smuzhiyun vcpu->arch.sie_block->io_int_parm = io->io_int_parm;
1148*4882a593Smuzhiyun vcpu->arch.sie_block->io_int_word = io->io_int_word;
1149*4882a593Smuzhiyun return 0;
1150*4882a593Smuzhiyun }
1151*4882a593Smuzhiyun
1152*4882a593Smuzhiyun rc = put_guest_lc(vcpu, io->subchannel_id, (u16 *)__LC_SUBCHANNEL_ID);
1153*4882a593Smuzhiyun rc |= put_guest_lc(vcpu, io->subchannel_nr, (u16 *)__LC_SUBCHANNEL_NR);
1154*4882a593Smuzhiyun rc |= put_guest_lc(vcpu, io->io_int_parm, (u32 *)__LC_IO_INT_PARM);
1155*4882a593Smuzhiyun rc |= put_guest_lc(vcpu, io->io_int_word, (u32 *)__LC_IO_INT_WORD);
1156*4882a593Smuzhiyun rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
1157*4882a593Smuzhiyun &vcpu->arch.sie_block->gpsw,
1158*4882a593Smuzhiyun sizeof(psw_t));
1159*4882a593Smuzhiyun rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
1160*4882a593Smuzhiyun &vcpu->arch.sie_block->gpsw,
1161*4882a593Smuzhiyun sizeof(psw_t));
1162*4882a593Smuzhiyun return rc ? -EFAULT : 0;
1163*4882a593Smuzhiyun }
1164*4882a593Smuzhiyun
__deliver_io(struct kvm_vcpu * vcpu,unsigned long irq_type)1165*4882a593Smuzhiyun static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
1166*4882a593Smuzhiyun unsigned long irq_type)
1167*4882a593Smuzhiyun {
1168*4882a593Smuzhiyun struct list_head *isc_list;
1169*4882a593Smuzhiyun struct kvm_s390_float_interrupt *fi;
1170*4882a593Smuzhiyun struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
1171*4882a593Smuzhiyun struct kvm_s390_interrupt_info *inti = NULL;
1172*4882a593Smuzhiyun struct kvm_s390_io_info io;
1173*4882a593Smuzhiyun u32 isc;
1174*4882a593Smuzhiyun int rc = 0;
1175*4882a593Smuzhiyun
1176*4882a593Smuzhiyun fi = &vcpu->kvm->arch.float_int;
1177*4882a593Smuzhiyun
1178*4882a593Smuzhiyun spin_lock(&fi->lock);
1179*4882a593Smuzhiyun isc = irq_type_to_isc(irq_type);
1180*4882a593Smuzhiyun isc_list = &fi->lists[isc];
1181*4882a593Smuzhiyun inti = list_first_entry_or_null(isc_list,
1182*4882a593Smuzhiyun struct kvm_s390_interrupt_info,
1183*4882a593Smuzhiyun list);
1184*4882a593Smuzhiyun if (inti) {
1185*4882a593Smuzhiyun if (inti->type & KVM_S390_INT_IO_AI_MASK)
1186*4882a593Smuzhiyun VCPU_EVENT(vcpu, 4, "%s", "deliver: I/O (AI)");
1187*4882a593Smuzhiyun else
1188*4882a593Smuzhiyun VCPU_EVENT(vcpu, 4, "deliver: I/O %x ss %x schid %04x",
1189*4882a593Smuzhiyun inti->io.subchannel_id >> 8,
1190*4882a593Smuzhiyun inti->io.subchannel_id >> 1 & 0x3,
1191*4882a593Smuzhiyun inti->io.subchannel_nr);
1192*4882a593Smuzhiyun
1193*4882a593Smuzhiyun vcpu->stat.deliver_io++;
1194*4882a593Smuzhiyun trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
1195*4882a593Smuzhiyun inti->type,
1196*4882a593Smuzhiyun ((__u32)inti->io.subchannel_id << 16) |
1197*4882a593Smuzhiyun inti->io.subchannel_nr,
1198*4882a593Smuzhiyun ((__u64)inti->io.io_int_parm << 32) |
1199*4882a593Smuzhiyun inti->io.io_int_word);
1200*4882a593Smuzhiyun list_del(&inti->list);
1201*4882a593Smuzhiyun fi->counters[FIRQ_CNTR_IO] -= 1;
1202*4882a593Smuzhiyun }
1203*4882a593Smuzhiyun if (list_empty(isc_list))
1204*4882a593Smuzhiyun clear_bit(irq_type, &fi->pending_irqs);
1205*4882a593Smuzhiyun spin_unlock(&fi->lock);
1206*4882a593Smuzhiyun
1207*4882a593Smuzhiyun if (inti) {
1208*4882a593Smuzhiyun rc = __do_deliver_io(vcpu, &(inti->io));
1209*4882a593Smuzhiyun kfree(inti);
1210*4882a593Smuzhiyun goto out;
1211*4882a593Smuzhiyun }
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun if (gi->origin && gisa_tac_ipm_gisc(gi->origin, isc)) {
1214*4882a593Smuzhiyun /*
1215*4882a593Smuzhiyun * in case an adapter interrupt was not delivered
1216*4882a593Smuzhiyun * in SIE context KVM will handle the delivery
1217*4882a593Smuzhiyun */
1218*4882a593Smuzhiyun VCPU_EVENT(vcpu, 4, "%s isc %u", "deliver: I/O (AI/gisa)", isc);
1219*4882a593Smuzhiyun memset(&io, 0, sizeof(io));
1220*4882a593Smuzhiyun io.io_int_word = isc_to_int_word(isc);
1221*4882a593Smuzhiyun vcpu->stat.deliver_io++;
1222*4882a593Smuzhiyun trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
1223*4882a593Smuzhiyun KVM_S390_INT_IO(1, 0, 0, 0),
1224*4882a593Smuzhiyun ((__u32)io.subchannel_id << 16) |
1225*4882a593Smuzhiyun io.subchannel_nr,
1226*4882a593Smuzhiyun ((__u64)io.io_int_parm << 32) |
1227*4882a593Smuzhiyun io.io_int_word);
1228*4882a593Smuzhiyun rc = __do_deliver_io(vcpu, &io);
1229*4882a593Smuzhiyun }
1230*4882a593Smuzhiyun out:
1231*4882a593Smuzhiyun return rc;
1232*4882a593Smuzhiyun }
1233*4882a593Smuzhiyun
1234*4882a593Smuzhiyun /* Check whether an external call is pending (deliverable or not) */
kvm_s390_ext_call_pending(struct kvm_vcpu * vcpu)1235*4882a593Smuzhiyun int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
1236*4882a593Smuzhiyun {
1237*4882a593Smuzhiyun struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun if (!sclp.has_sigpif)
1240*4882a593Smuzhiyun return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun return sca_ext_call_pending(vcpu, NULL);
1243*4882a593Smuzhiyun }
1244*4882a593Smuzhiyun
kvm_s390_vcpu_has_irq(struct kvm_vcpu * vcpu,int exclude_stop)1245*4882a593Smuzhiyun int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
1246*4882a593Smuzhiyun {
1247*4882a593Smuzhiyun if (deliverable_irqs(vcpu))
1248*4882a593Smuzhiyun return 1;
1249*4882a593Smuzhiyun
1250*4882a593Smuzhiyun if (kvm_cpu_has_pending_timer(vcpu))
1251*4882a593Smuzhiyun return 1;
1252*4882a593Smuzhiyun
1253*4882a593Smuzhiyun /* external call pending and deliverable */
1254*4882a593Smuzhiyun if (kvm_s390_ext_call_pending(vcpu) &&
1255*4882a593Smuzhiyun !psw_extint_disabled(vcpu) &&
1256*4882a593Smuzhiyun (vcpu->arch.sie_block->gcr[0] & CR0_EXTERNAL_CALL_SUBMASK))
1257*4882a593Smuzhiyun return 1;
1258*4882a593Smuzhiyun
1259*4882a593Smuzhiyun if (!exclude_stop && kvm_s390_is_stop_irq_pending(vcpu))
1260*4882a593Smuzhiyun return 1;
1261*4882a593Smuzhiyun return 0;
1262*4882a593Smuzhiyun }
1263*4882a593Smuzhiyun
kvm_cpu_has_pending_timer(struct kvm_vcpu * vcpu)1264*4882a593Smuzhiyun int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1265*4882a593Smuzhiyun {
1266*4882a593Smuzhiyun return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu);
1267*4882a593Smuzhiyun }
1268*4882a593Smuzhiyun
__calculate_sltime(struct kvm_vcpu * vcpu)1269*4882a593Smuzhiyun static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
1270*4882a593Smuzhiyun {
1271*4882a593Smuzhiyun const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
1272*4882a593Smuzhiyun const u64 ckc = vcpu->arch.sie_block->ckc;
1273*4882a593Smuzhiyun u64 cputm, sltime = 0;
1274*4882a593Smuzhiyun
1275*4882a593Smuzhiyun if (ckc_interrupts_enabled(vcpu)) {
1276*4882a593Smuzhiyun if (vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SIGN) {
1277*4882a593Smuzhiyun if ((s64)now < (s64)ckc)
1278*4882a593Smuzhiyun sltime = tod_to_ns((s64)ckc - (s64)now);
1279*4882a593Smuzhiyun } else if (now < ckc) {
1280*4882a593Smuzhiyun sltime = tod_to_ns(ckc - now);
1281*4882a593Smuzhiyun }
1282*4882a593Smuzhiyun /* already expired */
1283*4882a593Smuzhiyun if (!sltime)
1284*4882a593Smuzhiyun return 0;
1285*4882a593Smuzhiyun if (cpu_timer_interrupts_enabled(vcpu)) {
1286*4882a593Smuzhiyun cputm = kvm_s390_get_cpu_timer(vcpu);
1287*4882a593Smuzhiyun /* already expired? */
1288*4882a593Smuzhiyun if (cputm >> 63)
1289*4882a593Smuzhiyun return 0;
1290*4882a593Smuzhiyun return min(sltime, tod_to_ns(cputm));
1291*4882a593Smuzhiyun }
1292*4882a593Smuzhiyun } else if (cpu_timer_interrupts_enabled(vcpu)) {
1293*4882a593Smuzhiyun sltime = kvm_s390_get_cpu_timer(vcpu);
1294*4882a593Smuzhiyun /* already expired? */
1295*4882a593Smuzhiyun if (sltime >> 63)
1296*4882a593Smuzhiyun return 0;
1297*4882a593Smuzhiyun }
1298*4882a593Smuzhiyun return sltime;
1299*4882a593Smuzhiyun }
1300*4882a593Smuzhiyun
kvm_s390_handle_wait(struct kvm_vcpu * vcpu)1301*4882a593Smuzhiyun int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
1302*4882a593Smuzhiyun {
1303*4882a593Smuzhiyun struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
1304*4882a593Smuzhiyun u64 sltime;
1305*4882a593Smuzhiyun
1306*4882a593Smuzhiyun vcpu->stat.exit_wait_state++;
1307*4882a593Smuzhiyun
1308*4882a593Smuzhiyun /* fast path */
1309*4882a593Smuzhiyun if (kvm_arch_vcpu_runnable(vcpu))
1310*4882a593Smuzhiyun return 0;
1311*4882a593Smuzhiyun
1312*4882a593Smuzhiyun if (psw_interrupts_disabled(vcpu)) {
1313*4882a593Smuzhiyun VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
1314*4882a593Smuzhiyun return -EOPNOTSUPP; /* disabled wait */
1315*4882a593Smuzhiyun }
1316*4882a593Smuzhiyun
1317*4882a593Smuzhiyun if (gi->origin &&
1318*4882a593Smuzhiyun (gisa_get_ipm_or_restore_iam(gi) &
1319*4882a593Smuzhiyun vcpu->arch.sie_block->gcr[6] >> 24))
1320*4882a593Smuzhiyun return 0;
1321*4882a593Smuzhiyun
1322*4882a593Smuzhiyun if (!ckc_interrupts_enabled(vcpu) &&
1323*4882a593Smuzhiyun !cpu_timer_interrupts_enabled(vcpu)) {
1324*4882a593Smuzhiyun VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
1325*4882a593Smuzhiyun __set_cpu_idle(vcpu);
1326*4882a593Smuzhiyun goto no_timer;
1327*4882a593Smuzhiyun }
1328*4882a593Smuzhiyun
1329*4882a593Smuzhiyun sltime = __calculate_sltime(vcpu);
1330*4882a593Smuzhiyun if (!sltime)
1331*4882a593Smuzhiyun return 0;
1332*4882a593Smuzhiyun
1333*4882a593Smuzhiyun __set_cpu_idle(vcpu);
1334*4882a593Smuzhiyun hrtimer_start(&vcpu->arch.ckc_timer, sltime, HRTIMER_MODE_REL);
1335*4882a593Smuzhiyun VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
1336*4882a593Smuzhiyun no_timer:
1337*4882a593Smuzhiyun srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1338*4882a593Smuzhiyun kvm_vcpu_block(vcpu);
1339*4882a593Smuzhiyun __unset_cpu_idle(vcpu);
1340*4882a593Smuzhiyun vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1341*4882a593Smuzhiyun
1342*4882a593Smuzhiyun hrtimer_cancel(&vcpu->arch.ckc_timer);
1343*4882a593Smuzhiyun return 0;
1344*4882a593Smuzhiyun }
1345*4882a593Smuzhiyun
kvm_s390_vcpu_wakeup(struct kvm_vcpu * vcpu)1346*4882a593Smuzhiyun void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
1347*4882a593Smuzhiyun {
1348*4882a593Smuzhiyun vcpu->valid_wakeup = true;
1349*4882a593Smuzhiyun kvm_vcpu_wake_up(vcpu);
1350*4882a593Smuzhiyun
1351*4882a593Smuzhiyun /*
1352*4882a593Smuzhiyun * The VCPU might not be sleeping but rather executing VSIE. Let's
1353*4882a593Smuzhiyun * kick it, so it leaves the SIE to process the request.
1354*4882a593Smuzhiyun */
1355*4882a593Smuzhiyun kvm_s390_vsie_kick(vcpu);
1356*4882a593Smuzhiyun }
1357*4882a593Smuzhiyun
kvm_s390_idle_wakeup(struct hrtimer * timer)1358*4882a593Smuzhiyun enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
1359*4882a593Smuzhiyun {
1360*4882a593Smuzhiyun struct kvm_vcpu *vcpu;
1361*4882a593Smuzhiyun u64 sltime;
1362*4882a593Smuzhiyun
1363*4882a593Smuzhiyun vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
1364*4882a593Smuzhiyun sltime = __calculate_sltime(vcpu);
1365*4882a593Smuzhiyun
1366*4882a593Smuzhiyun /*
1367*4882a593Smuzhiyun * If the monotonic clock runs faster than the tod clock we might be
1368*4882a593Smuzhiyun * woken up too early and have to go back to sleep to avoid deadlocks.
1369*4882a593Smuzhiyun */
1370*4882a593Smuzhiyun if (sltime && hrtimer_forward_now(timer, ns_to_ktime(sltime)))
1371*4882a593Smuzhiyun return HRTIMER_RESTART;
1372*4882a593Smuzhiyun kvm_s390_vcpu_wakeup(vcpu);
1373*4882a593Smuzhiyun return HRTIMER_NORESTART;
1374*4882a593Smuzhiyun }
1375*4882a593Smuzhiyun
kvm_s390_clear_local_irqs(struct kvm_vcpu * vcpu)1376*4882a593Smuzhiyun void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
1377*4882a593Smuzhiyun {
1378*4882a593Smuzhiyun struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1379*4882a593Smuzhiyun
1380*4882a593Smuzhiyun spin_lock(&li->lock);
1381*4882a593Smuzhiyun li->pending_irqs = 0;
1382*4882a593Smuzhiyun bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS);
1383*4882a593Smuzhiyun memset(&li->irq, 0, sizeof(li->irq));
1384*4882a593Smuzhiyun spin_unlock(&li->lock);
1385*4882a593Smuzhiyun
1386*4882a593Smuzhiyun sca_clear_ext_call(vcpu);
1387*4882a593Smuzhiyun }
1388*4882a593Smuzhiyun
kvm_s390_deliver_pending_interrupts(struct kvm_vcpu * vcpu)1389*4882a593Smuzhiyun int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
1390*4882a593Smuzhiyun {
1391*4882a593Smuzhiyun struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1392*4882a593Smuzhiyun int rc = 0;
1393*4882a593Smuzhiyun unsigned long irq_type;
1394*4882a593Smuzhiyun unsigned long irqs;
1395*4882a593Smuzhiyun
1396*4882a593Smuzhiyun __reset_intercept_indicators(vcpu);
1397*4882a593Smuzhiyun
1398*4882a593Smuzhiyun /* pending ckc conditions might have been invalidated */
1399*4882a593Smuzhiyun clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1400*4882a593Smuzhiyun if (ckc_irq_pending(vcpu))
1401*4882a593Smuzhiyun set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1402*4882a593Smuzhiyun
1403*4882a593Smuzhiyun /* pending cpu timer conditions might have been invalidated */
1404*4882a593Smuzhiyun clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1405*4882a593Smuzhiyun if (cpu_timer_irq_pending(vcpu))
1406*4882a593Smuzhiyun set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1407*4882a593Smuzhiyun
1408*4882a593Smuzhiyun while ((irqs = deliverable_irqs(vcpu)) && !rc) {
1409*4882a593Smuzhiyun /* bits are in the reverse order of interrupt priority */
1410*4882a593Smuzhiyun irq_type = find_last_bit(&irqs, IRQ_PEND_COUNT);
1411*4882a593Smuzhiyun switch (irq_type) {
1412*4882a593Smuzhiyun case IRQ_PEND_IO_ISC_0:
1413*4882a593Smuzhiyun case IRQ_PEND_IO_ISC_1:
1414*4882a593Smuzhiyun case IRQ_PEND_IO_ISC_2:
1415*4882a593Smuzhiyun case IRQ_PEND_IO_ISC_3:
1416*4882a593Smuzhiyun case IRQ_PEND_IO_ISC_4:
1417*4882a593Smuzhiyun case IRQ_PEND_IO_ISC_5:
1418*4882a593Smuzhiyun case IRQ_PEND_IO_ISC_6:
1419*4882a593Smuzhiyun case IRQ_PEND_IO_ISC_7:
1420*4882a593Smuzhiyun rc = __deliver_io(vcpu, irq_type);
1421*4882a593Smuzhiyun break;
1422*4882a593Smuzhiyun case IRQ_PEND_MCHK_EX:
1423*4882a593Smuzhiyun case IRQ_PEND_MCHK_REP:
1424*4882a593Smuzhiyun rc = __deliver_machine_check(vcpu);
1425*4882a593Smuzhiyun break;
1426*4882a593Smuzhiyun case IRQ_PEND_PROG:
1427*4882a593Smuzhiyun rc = __deliver_prog(vcpu);
1428*4882a593Smuzhiyun break;
1429*4882a593Smuzhiyun case IRQ_PEND_EXT_EMERGENCY:
1430*4882a593Smuzhiyun rc = __deliver_emergency_signal(vcpu);
1431*4882a593Smuzhiyun break;
1432*4882a593Smuzhiyun case IRQ_PEND_EXT_EXTERNAL:
1433*4882a593Smuzhiyun rc = __deliver_external_call(vcpu);
1434*4882a593Smuzhiyun break;
1435*4882a593Smuzhiyun case IRQ_PEND_EXT_CLOCK_COMP:
1436*4882a593Smuzhiyun rc = __deliver_ckc(vcpu);
1437*4882a593Smuzhiyun break;
1438*4882a593Smuzhiyun case IRQ_PEND_EXT_CPU_TIMER:
1439*4882a593Smuzhiyun rc = __deliver_cpu_timer(vcpu);
1440*4882a593Smuzhiyun break;
1441*4882a593Smuzhiyun case IRQ_PEND_RESTART:
1442*4882a593Smuzhiyun rc = __deliver_restart(vcpu);
1443*4882a593Smuzhiyun break;
1444*4882a593Smuzhiyun case IRQ_PEND_SET_PREFIX:
1445*4882a593Smuzhiyun rc = __deliver_set_prefix(vcpu);
1446*4882a593Smuzhiyun break;
1447*4882a593Smuzhiyun case IRQ_PEND_PFAULT_INIT:
1448*4882a593Smuzhiyun rc = __deliver_pfault_init(vcpu);
1449*4882a593Smuzhiyun break;
1450*4882a593Smuzhiyun case IRQ_PEND_EXT_SERVICE:
1451*4882a593Smuzhiyun rc = __deliver_service(vcpu);
1452*4882a593Smuzhiyun break;
1453*4882a593Smuzhiyun case IRQ_PEND_EXT_SERVICE_EV:
1454*4882a593Smuzhiyun rc = __deliver_service_ev(vcpu);
1455*4882a593Smuzhiyun break;
1456*4882a593Smuzhiyun case IRQ_PEND_PFAULT_DONE:
1457*4882a593Smuzhiyun rc = __deliver_pfault_done(vcpu);
1458*4882a593Smuzhiyun break;
1459*4882a593Smuzhiyun case IRQ_PEND_VIRTIO:
1460*4882a593Smuzhiyun rc = __deliver_virtio(vcpu);
1461*4882a593Smuzhiyun break;
1462*4882a593Smuzhiyun default:
1463*4882a593Smuzhiyun WARN_ONCE(1, "Unknown pending irq type %ld", irq_type);
1464*4882a593Smuzhiyun clear_bit(irq_type, &li->pending_irqs);
1465*4882a593Smuzhiyun }
1466*4882a593Smuzhiyun }
1467*4882a593Smuzhiyun
1468*4882a593Smuzhiyun set_intercept_indicators(vcpu);
1469*4882a593Smuzhiyun
1470*4882a593Smuzhiyun return rc;
1471*4882a593Smuzhiyun }
1472*4882a593Smuzhiyun
__inject_prog(struct kvm_vcpu * vcpu,struct kvm_s390_irq * irq)1473*4882a593Smuzhiyun static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1474*4882a593Smuzhiyun {
1475*4882a593Smuzhiyun struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1476*4882a593Smuzhiyun
1477*4882a593Smuzhiyun vcpu->stat.inject_program++;
1478*4882a593Smuzhiyun VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code);
1479*4882a593Smuzhiyun trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
1480*4882a593Smuzhiyun irq->u.pgm.code, 0);
1481*4882a593Smuzhiyun
1482*4882a593Smuzhiyun if (!(irq->u.pgm.flags & KVM_S390_PGM_FLAGS_ILC_VALID)) {
1483*4882a593Smuzhiyun /* auto detection if no valid ILC was given */
1484*4882a593Smuzhiyun irq->u.pgm.flags &= ~KVM_S390_PGM_FLAGS_ILC_MASK;
1485*4882a593Smuzhiyun irq->u.pgm.flags |= kvm_s390_get_ilen(vcpu);
1486*4882a593Smuzhiyun irq->u.pgm.flags |= KVM_S390_PGM_FLAGS_ILC_VALID;
1487*4882a593Smuzhiyun }
1488*4882a593Smuzhiyun
1489*4882a593Smuzhiyun if (irq->u.pgm.code == PGM_PER) {
1490*4882a593Smuzhiyun li->irq.pgm.code |= PGM_PER;
1491*4882a593Smuzhiyun li->irq.pgm.flags = irq->u.pgm.flags;
1492*4882a593Smuzhiyun /* only modify PER related information */
1493*4882a593Smuzhiyun li->irq.pgm.per_address = irq->u.pgm.per_address;
1494*4882a593Smuzhiyun li->irq.pgm.per_code = irq->u.pgm.per_code;
1495*4882a593Smuzhiyun li->irq.pgm.per_atmid = irq->u.pgm.per_atmid;
1496*4882a593Smuzhiyun li->irq.pgm.per_access_id = irq->u.pgm.per_access_id;
1497*4882a593Smuzhiyun } else if (!(irq->u.pgm.code & PGM_PER)) {
1498*4882a593Smuzhiyun li->irq.pgm.code = (li->irq.pgm.code & PGM_PER) |
1499*4882a593Smuzhiyun irq->u.pgm.code;
1500*4882a593Smuzhiyun li->irq.pgm.flags = irq->u.pgm.flags;
1501*4882a593Smuzhiyun /* only modify non-PER information */
1502*4882a593Smuzhiyun li->irq.pgm.trans_exc_code = irq->u.pgm.trans_exc_code;
1503*4882a593Smuzhiyun li->irq.pgm.mon_code = irq->u.pgm.mon_code;
1504*4882a593Smuzhiyun li->irq.pgm.data_exc_code = irq->u.pgm.data_exc_code;
1505*4882a593Smuzhiyun li->irq.pgm.mon_class_nr = irq->u.pgm.mon_class_nr;
1506*4882a593Smuzhiyun li->irq.pgm.exc_access_id = irq->u.pgm.exc_access_id;
1507*4882a593Smuzhiyun li->irq.pgm.op_access_id = irq->u.pgm.op_access_id;
1508*4882a593Smuzhiyun } else {
1509*4882a593Smuzhiyun li->irq.pgm = irq->u.pgm;
1510*4882a593Smuzhiyun }
1511*4882a593Smuzhiyun set_bit(IRQ_PEND_PROG, &li->pending_irqs);
1512*4882a593Smuzhiyun return 0;
1513*4882a593Smuzhiyun }
1514*4882a593Smuzhiyun
__inject_pfault_init(struct kvm_vcpu * vcpu,struct kvm_s390_irq * irq)1515*4882a593Smuzhiyun static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1516*4882a593Smuzhiyun {
1517*4882a593Smuzhiyun struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1518*4882a593Smuzhiyun
1519*4882a593Smuzhiyun vcpu->stat.inject_pfault_init++;
1520*4882a593Smuzhiyun VCPU_EVENT(vcpu, 4, "inject: pfault init parameter block at 0x%llx",
1521*4882a593Smuzhiyun irq->u.ext.ext_params2);
1522*4882a593Smuzhiyun trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT,
1523*4882a593Smuzhiyun irq->u.ext.ext_params,
1524*4882a593Smuzhiyun irq->u.ext.ext_params2);
1525*4882a593Smuzhiyun
1526*4882a593Smuzhiyun li->irq.ext = irq->u.ext;
1527*4882a593Smuzhiyun set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
1528*4882a593Smuzhiyun kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
1529*4882a593Smuzhiyun return 0;
1530*4882a593Smuzhiyun }
1531*4882a593Smuzhiyun
__inject_extcall(struct kvm_vcpu * vcpu,struct kvm_s390_irq * irq)1532*4882a593Smuzhiyun static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1533*4882a593Smuzhiyun {
1534*4882a593Smuzhiyun struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1535*4882a593Smuzhiyun struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
1536*4882a593Smuzhiyun uint16_t src_id = irq->u.extcall.code;
1537*4882a593Smuzhiyun
1538*4882a593Smuzhiyun vcpu->stat.inject_external_call++;
1539*4882a593Smuzhiyun VCPU_EVENT(vcpu, 4, "inject: external call source-cpu:%u",
1540*4882a593Smuzhiyun src_id);
1541*4882a593Smuzhiyun trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
1542*4882a593Smuzhiyun src_id, 0);
1543*4882a593Smuzhiyun
1544*4882a593Smuzhiyun /* sending vcpu invalid */
1545*4882a593Smuzhiyun if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL)
1546*4882a593Smuzhiyun return -EINVAL;
1547*4882a593Smuzhiyun
1548*4882a593Smuzhiyun if (sclp.has_sigpif && !kvm_s390_pv_cpu_get_handle(vcpu))
1549*4882a593Smuzhiyun return sca_inject_ext_call(vcpu, src_id);
1550*4882a593Smuzhiyun
1551*4882a593Smuzhiyun if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
1552*4882a593Smuzhiyun return -EBUSY;
1553*4882a593Smuzhiyun *extcall = irq->u.extcall;
1554*4882a593Smuzhiyun kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
1555*4882a593Smuzhiyun return 0;
1556*4882a593Smuzhiyun }
1557*4882a593Smuzhiyun
__inject_set_prefix(struct kvm_vcpu * vcpu,struct kvm_s390_irq * irq)1558*4882a593Smuzhiyun static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1559*4882a593Smuzhiyun {
1560*4882a593Smuzhiyun struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1561*4882a593Smuzhiyun struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
1562*4882a593Smuzhiyun
1563*4882a593Smuzhiyun vcpu->stat.inject_set_prefix++;
1564*4882a593Smuzhiyun VCPU_EVENT(vcpu, 3, "inject: set prefix to %x",
1565*4882a593Smuzhiyun irq->u.prefix.address);
1566*4882a593Smuzhiyun trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
1567*4882a593Smuzhiyun irq->u.prefix.address, 0);
1568*4882a593Smuzhiyun
1569*4882a593Smuzhiyun if (!is_vcpu_stopped(vcpu))
1570*4882a593Smuzhiyun return -EBUSY;
1571*4882a593Smuzhiyun
1572*4882a593Smuzhiyun *prefix = irq->u.prefix;
1573*4882a593Smuzhiyun set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
1574*4882a593Smuzhiyun return 0;
1575*4882a593Smuzhiyun }
1576*4882a593Smuzhiyun
1577*4882a593Smuzhiyun #define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
__inject_sigp_stop(struct kvm_vcpu * vcpu,struct kvm_s390_irq * irq)1578*4882a593Smuzhiyun static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1579*4882a593Smuzhiyun {
1580*4882a593Smuzhiyun struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1581*4882a593Smuzhiyun struct kvm_s390_stop_info *stop = &li->irq.stop;
1582*4882a593Smuzhiyun int rc = 0;
1583*4882a593Smuzhiyun
1584*4882a593Smuzhiyun vcpu->stat.inject_stop_signal++;
1585*4882a593Smuzhiyun trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0);
1586*4882a593Smuzhiyun
1587*4882a593Smuzhiyun if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS)
1588*4882a593Smuzhiyun return -EINVAL;
1589*4882a593Smuzhiyun
1590*4882a593Smuzhiyun if (is_vcpu_stopped(vcpu)) {
1591*4882a593Smuzhiyun if (irq->u.stop.flags & KVM_S390_STOP_FLAG_STORE_STATUS)
1592*4882a593Smuzhiyun rc = kvm_s390_store_status_unloaded(vcpu,
1593*4882a593Smuzhiyun KVM_S390_STORE_STATUS_NOADDR);
1594*4882a593Smuzhiyun return rc;
1595*4882a593Smuzhiyun }
1596*4882a593Smuzhiyun
1597*4882a593Smuzhiyun if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs))
1598*4882a593Smuzhiyun return -EBUSY;
1599*4882a593Smuzhiyun stop->flags = irq->u.stop.flags;
1600*4882a593Smuzhiyun kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
1601*4882a593Smuzhiyun return 0;
1602*4882a593Smuzhiyun }
1603*4882a593Smuzhiyun
__inject_sigp_restart(struct kvm_vcpu * vcpu)1604*4882a593Smuzhiyun static int __inject_sigp_restart(struct kvm_vcpu *vcpu)
1605*4882a593Smuzhiyun {
1606*4882a593Smuzhiyun struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1607*4882a593Smuzhiyun
1608*4882a593Smuzhiyun vcpu->stat.inject_restart++;
1609*4882a593Smuzhiyun VCPU_EVENT(vcpu, 3, "%s", "inject: restart int");
1610*4882a593Smuzhiyun trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
1611*4882a593Smuzhiyun
1612*4882a593Smuzhiyun set_bit(IRQ_PEND_RESTART, &li->pending_irqs);
1613*4882a593Smuzhiyun return 0;
1614*4882a593Smuzhiyun }
1615*4882a593Smuzhiyun
__inject_sigp_emergency(struct kvm_vcpu * vcpu,struct kvm_s390_irq * irq)1616*4882a593Smuzhiyun static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
1617*4882a593Smuzhiyun struct kvm_s390_irq *irq)
1618*4882a593Smuzhiyun {
1619*4882a593Smuzhiyun struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1620*4882a593Smuzhiyun
1621*4882a593Smuzhiyun vcpu->stat.inject_emergency_signal++;
1622*4882a593Smuzhiyun VCPU_EVENT(vcpu, 4, "inject: emergency from cpu %u",
1623*4882a593Smuzhiyun irq->u.emerg.code);
1624*4882a593Smuzhiyun trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
1625*4882a593Smuzhiyun irq->u.emerg.code, 0);
1626*4882a593Smuzhiyun
1627*4882a593Smuzhiyun /* sending vcpu invalid */
1628*4882a593Smuzhiyun if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL)
1629*4882a593Smuzhiyun return -EINVAL;
1630*4882a593Smuzhiyun
1631*4882a593Smuzhiyun set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
1632*4882a593Smuzhiyun set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
1633*4882a593Smuzhiyun kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
1634*4882a593Smuzhiyun return 0;
1635*4882a593Smuzhiyun }
1636*4882a593Smuzhiyun
__inject_mchk(struct kvm_vcpu * vcpu,struct kvm_s390_irq * irq)1637*4882a593Smuzhiyun static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1638*4882a593Smuzhiyun {
1639*4882a593Smuzhiyun struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1640*4882a593Smuzhiyun struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
1641*4882a593Smuzhiyun
1642*4882a593Smuzhiyun vcpu->stat.inject_mchk++;
1643*4882a593Smuzhiyun VCPU_EVENT(vcpu, 3, "inject: machine check mcic 0x%llx",
1644*4882a593Smuzhiyun irq->u.mchk.mcic);
1645*4882a593Smuzhiyun trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
1646*4882a593Smuzhiyun irq->u.mchk.mcic);
1647*4882a593Smuzhiyun
1648*4882a593Smuzhiyun /*
1649*4882a593Smuzhiyun * Because repressible machine checks can be indicated along with
1650*4882a593Smuzhiyun * exigent machine checks (PoP, Chapter 11, Interruption action)
1651*4882a593Smuzhiyun * we need to combine cr14, mcic and external damage code.
1652*4882a593Smuzhiyun * Failing storage address and the logout area should not be or'ed
1653*4882a593Smuzhiyun * together, we just indicate the last occurrence of the corresponding
1654*4882a593Smuzhiyun * machine check
1655*4882a593Smuzhiyun */
1656*4882a593Smuzhiyun mchk->cr14 |= irq->u.mchk.cr14;
1657*4882a593Smuzhiyun mchk->mcic |= irq->u.mchk.mcic;
1658*4882a593Smuzhiyun mchk->ext_damage_code |= irq->u.mchk.ext_damage_code;
1659*4882a593Smuzhiyun mchk->failing_storage_address = irq->u.mchk.failing_storage_address;
1660*4882a593Smuzhiyun memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout,
1661*4882a593Smuzhiyun sizeof(mchk->fixed_logout));
1662*4882a593Smuzhiyun if (mchk->mcic & MCHK_EX_MASK)
1663*4882a593Smuzhiyun set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
1664*4882a593Smuzhiyun else if (mchk->mcic & MCHK_REP_MASK)
1665*4882a593Smuzhiyun set_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
1666*4882a593Smuzhiyun return 0;
1667*4882a593Smuzhiyun }
1668*4882a593Smuzhiyun
__inject_ckc(struct kvm_vcpu * vcpu)1669*4882a593Smuzhiyun static int __inject_ckc(struct kvm_vcpu *vcpu)
1670*4882a593Smuzhiyun {
1671*4882a593Smuzhiyun struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1672*4882a593Smuzhiyun
1673*4882a593Smuzhiyun vcpu->stat.inject_ckc++;
1674*4882a593Smuzhiyun VCPU_EVENT(vcpu, 3, "%s", "inject: clock comparator external");
1675*4882a593Smuzhiyun trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
1676*4882a593Smuzhiyun 0, 0);
1677*4882a593Smuzhiyun
1678*4882a593Smuzhiyun set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1679*4882a593Smuzhiyun kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
1680*4882a593Smuzhiyun return 0;
1681*4882a593Smuzhiyun }
1682*4882a593Smuzhiyun
__inject_cpu_timer(struct kvm_vcpu * vcpu)1683*4882a593Smuzhiyun static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
1684*4882a593Smuzhiyun {
1685*4882a593Smuzhiyun struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1686*4882a593Smuzhiyun
1687*4882a593Smuzhiyun vcpu->stat.inject_cputm++;
1688*4882a593Smuzhiyun VCPU_EVENT(vcpu, 3, "%s", "inject: cpu timer external");
1689*4882a593Smuzhiyun trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
1690*4882a593Smuzhiyun 0, 0);
1691*4882a593Smuzhiyun
1692*4882a593Smuzhiyun set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1693*4882a593Smuzhiyun kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
1694*4882a593Smuzhiyun return 0;
1695*4882a593Smuzhiyun }
1696*4882a593Smuzhiyun
get_io_int(struct kvm * kvm,int isc,u32 schid)1697*4882a593Smuzhiyun static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm,
1698*4882a593Smuzhiyun int isc, u32 schid)
1699*4882a593Smuzhiyun {
1700*4882a593Smuzhiyun struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1701*4882a593Smuzhiyun struct list_head *isc_list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
1702*4882a593Smuzhiyun struct kvm_s390_interrupt_info *iter;
1703*4882a593Smuzhiyun u16 id = (schid & 0xffff0000U) >> 16;
1704*4882a593Smuzhiyun u16 nr = schid & 0x0000ffffU;
1705*4882a593Smuzhiyun
1706*4882a593Smuzhiyun spin_lock(&fi->lock);
1707*4882a593Smuzhiyun list_for_each_entry(iter, isc_list, list) {
1708*4882a593Smuzhiyun if (schid && (id != iter->io.subchannel_id ||
1709*4882a593Smuzhiyun nr != iter->io.subchannel_nr))
1710*4882a593Smuzhiyun continue;
1711*4882a593Smuzhiyun /* found an appropriate entry */
1712*4882a593Smuzhiyun list_del_init(&iter->list);
1713*4882a593Smuzhiyun fi->counters[FIRQ_CNTR_IO] -= 1;
1714*4882a593Smuzhiyun if (list_empty(isc_list))
1715*4882a593Smuzhiyun clear_bit(isc_to_irq_type(isc), &fi->pending_irqs);
1716*4882a593Smuzhiyun spin_unlock(&fi->lock);
1717*4882a593Smuzhiyun return iter;
1718*4882a593Smuzhiyun }
1719*4882a593Smuzhiyun spin_unlock(&fi->lock);
1720*4882a593Smuzhiyun return NULL;
1721*4882a593Smuzhiyun }
1722*4882a593Smuzhiyun
get_top_io_int(struct kvm * kvm,u64 isc_mask,u32 schid)1723*4882a593Smuzhiyun static struct kvm_s390_interrupt_info *get_top_io_int(struct kvm *kvm,
1724*4882a593Smuzhiyun u64 isc_mask, u32 schid)
1725*4882a593Smuzhiyun {
1726*4882a593Smuzhiyun struct kvm_s390_interrupt_info *inti = NULL;
1727*4882a593Smuzhiyun int isc;
1728*4882a593Smuzhiyun
1729*4882a593Smuzhiyun for (isc = 0; isc <= MAX_ISC && !inti; isc++) {
1730*4882a593Smuzhiyun if (isc_mask & isc_to_isc_bits(isc))
1731*4882a593Smuzhiyun inti = get_io_int(kvm, isc, schid);
1732*4882a593Smuzhiyun }
1733*4882a593Smuzhiyun return inti;
1734*4882a593Smuzhiyun }
1735*4882a593Smuzhiyun
get_top_gisa_isc(struct kvm * kvm,u64 isc_mask,u32 schid)1736*4882a593Smuzhiyun static int get_top_gisa_isc(struct kvm *kvm, u64 isc_mask, u32 schid)
1737*4882a593Smuzhiyun {
1738*4882a593Smuzhiyun struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
1739*4882a593Smuzhiyun unsigned long active_mask;
1740*4882a593Smuzhiyun int isc;
1741*4882a593Smuzhiyun
1742*4882a593Smuzhiyun if (schid)
1743*4882a593Smuzhiyun goto out;
1744*4882a593Smuzhiyun if (!gi->origin)
1745*4882a593Smuzhiyun goto out;
1746*4882a593Smuzhiyun
1747*4882a593Smuzhiyun active_mask = (isc_mask & gisa_get_ipm(gi->origin) << 24) << 32;
1748*4882a593Smuzhiyun while (active_mask) {
1749*4882a593Smuzhiyun isc = __fls(active_mask) ^ (BITS_PER_LONG - 1);
1750*4882a593Smuzhiyun if (gisa_tac_ipm_gisc(gi->origin, isc))
1751*4882a593Smuzhiyun return isc;
1752*4882a593Smuzhiyun clear_bit_inv(isc, &active_mask);
1753*4882a593Smuzhiyun }
1754*4882a593Smuzhiyun out:
1755*4882a593Smuzhiyun return -EINVAL;
1756*4882a593Smuzhiyun }
1757*4882a593Smuzhiyun
1758*4882a593Smuzhiyun /*
1759*4882a593Smuzhiyun * Dequeue and return an I/O interrupt matching any of the interruption
1760*4882a593Smuzhiyun * subclasses as designated by the isc mask in cr6 and the schid (if != 0).
1761*4882a593Smuzhiyun * Take into account the interrupts pending in the interrupt list and in GISA.
1762*4882a593Smuzhiyun *
1763*4882a593Smuzhiyun * Note that for a guest that does not enable I/O interrupts
1764*4882a593Smuzhiyun * but relies on TPI, a flood of classic interrupts may starve
1765*4882a593Smuzhiyun * out adapter interrupts on the same isc. Linux does not do
1766*4882a593Smuzhiyun * that, and it is possible to work around the issue by configuring
1767*4882a593Smuzhiyun * different iscs for classic and adapter interrupts in the guest,
1768*4882a593Smuzhiyun * but we may want to revisit this in the future.
1769*4882a593Smuzhiyun */
kvm_s390_get_io_int(struct kvm * kvm,u64 isc_mask,u32 schid)1770*4882a593Smuzhiyun struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
1771*4882a593Smuzhiyun u64 isc_mask, u32 schid)
1772*4882a593Smuzhiyun {
1773*4882a593Smuzhiyun struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
1774*4882a593Smuzhiyun struct kvm_s390_interrupt_info *inti, *tmp_inti;
1775*4882a593Smuzhiyun int isc;
1776*4882a593Smuzhiyun
1777*4882a593Smuzhiyun inti = get_top_io_int(kvm, isc_mask, schid);
1778*4882a593Smuzhiyun
1779*4882a593Smuzhiyun isc = get_top_gisa_isc(kvm, isc_mask, schid);
1780*4882a593Smuzhiyun if (isc < 0)
1781*4882a593Smuzhiyun /* no AI in GISA */
1782*4882a593Smuzhiyun goto out;
1783*4882a593Smuzhiyun
1784*4882a593Smuzhiyun if (!inti)
1785*4882a593Smuzhiyun /* AI in GISA but no classical IO int */
1786*4882a593Smuzhiyun goto gisa_out;
1787*4882a593Smuzhiyun
1788*4882a593Smuzhiyun /* both types of interrupts present */
1789*4882a593Smuzhiyun if (int_word_to_isc(inti->io.io_int_word) <= isc) {
1790*4882a593Smuzhiyun /* classical IO int with higher priority */
1791*4882a593Smuzhiyun gisa_set_ipm_gisc(gi->origin, isc);
1792*4882a593Smuzhiyun goto out;
1793*4882a593Smuzhiyun }
1794*4882a593Smuzhiyun gisa_out:
1795*4882a593Smuzhiyun tmp_inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1796*4882a593Smuzhiyun if (tmp_inti) {
1797*4882a593Smuzhiyun tmp_inti->type = KVM_S390_INT_IO(1, 0, 0, 0);
1798*4882a593Smuzhiyun tmp_inti->io.io_int_word = isc_to_int_word(isc);
1799*4882a593Smuzhiyun if (inti)
1800*4882a593Smuzhiyun kvm_s390_reinject_io_int(kvm, inti);
1801*4882a593Smuzhiyun inti = tmp_inti;
1802*4882a593Smuzhiyun } else
1803*4882a593Smuzhiyun gisa_set_ipm_gisc(gi->origin, isc);
1804*4882a593Smuzhiyun out:
1805*4882a593Smuzhiyun return inti;
1806*4882a593Smuzhiyun }
1807*4882a593Smuzhiyun
__inject_service(struct kvm * kvm,struct kvm_s390_interrupt_info * inti)1808*4882a593Smuzhiyun static int __inject_service(struct kvm *kvm,
1809*4882a593Smuzhiyun struct kvm_s390_interrupt_info *inti)
1810*4882a593Smuzhiyun {
1811*4882a593Smuzhiyun struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1812*4882a593Smuzhiyun
1813*4882a593Smuzhiyun kvm->stat.inject_service_signal++;
1814*4882a593Smuzhiyun spin_lock(&fi->lock);
1815*4882a593Smuzhiyun fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING;
1816*4882a593Smuzhiyun
1817*4882a593Smuzhiyun /* We always allow events, track them separately from the sccb ints */
1818*4882a593Smuzhiyun if (fi->srv_signal.ext_params & SCCB_EVENT_PENDING)
1819*4882a593Smuzhiyun set_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs);
1820*4882a593Smuzhiyun
1821*4882a593Smuzhiyun /*
1822*4882a593Smuzhiyun * Early versions of the QEMU s390 bios will inject several
1823*4882a593Smuzhiyun * service interrupts after another without handling a
1824*4882a593Smuzhiyun * condition code indicating busy.
1825*4882a593Smuzhiyun * We will silently ignore those superfluous sccb values.
1826*4882a593Smuzhiyun * A future version of QEMU will take care of serialization
1827*4882a593Smuzhiyun * of servc requests
1828*4882a593Smuzhiyun */
1829*4882a593Smuzhiyun if (fi->srv_signal.ext_params & SCCB_MASK)
1830*4882a593Smuzhiyun goto out;
1831*4882a593Smuzhiyun fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_MASK;
1832*4882a593Smuzhiyun set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
1833*4882a593Smuzhiyun out:
1834*4882a593Smuzhiyun spin_unlock(&fi->lock);
1835*4882a593Smuzhiyun kfree(inti);
1836*4882a593Smuzhiyun return 0;
1837*4882a593Smuzhiyun }
1838*4882a593Smuzhiyun
__inject_virtio(struct kvm * kvm,struct kvm_s390_interrupt_info * inti)1839*4882a593Smuzhiyun static int __inject_virtio(struct kvm *kvm,
1840*4882a593Smuzhiyun struct kvm_s390_interrupt_info *inti)
1841*4882a593Smuzhiyun {
1842*4882a593Smuzhiyun struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1843*4882a593Smuzhiyun
1844*4882a593Smuzhiyun kvm->stat.inject_virtio++;
1845*4882a593Smuzhiyun spin_lock(&fi->lock);
1846*4882a593Smuzhiyun if (fi->counters[FIRQ_CNTR_VIRTIO] >= KVM_S390_MAX_VIRTIO_IRQS) {
1847*4882a593Smuzhiyun spin_unlock(&fi->lock);
1848*4882a593Smuzhiyun return -EBUSY;
1849*4882a593Smuzhiyun }
1850*4882a593Smuzhiyun fi->counters[FIRQ_CNTR_VIRTIO] += 1;
1851*4882a593Smuzhiyun list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_VIRTIO]);
1852*4882a593Smuzhiyun set_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
1853*4882a593Smuzhiyun spin_unlock(&fi->lock);
1854*4882a593Smuzhiyun return 0;
1855*4882a593Smuzhiyun }
1856*4882a593Smuzhiyun
__inject_pfault_done(struct kvm * kvm,struct kvm_s390_interrupt_info * inti)1857*4882a593Smuzhiyun static int __inject_pfault_done(struct kvm *kvm,
1858*4882a593Smuzhiyun struct kvm_s390_interrupt_info *inti)
1859*4882a593Smuzhiyun {
1860*4882a593Smuzhiyun struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1861*4882a593Smuzhiyun
1862*4882a593Smuzhiyun kvm->stat.inject_pfault_done++;
1863*4882a593Smuzhiyun spin_lock(&fi->lock);
1864*4882a593Smuzhiyun if (fi->counters[FIRQ_CNTR_PFAULT] >=
1865*4882a593Smuzhiyun (ASYNC_PF_PER_VCPU * KVM_MAX_VCPUS)) {
1866*4882a593Smuzhiyun spin_unlock(&fi->lock);
1867*4882a593Smuzhiyun return -EBUSY;
1868*4882a593Smuzhiyun }
1869*4882a593Smuzhiyun fi->counters[FIRQ_CNTR_PFAULT] += 1;
1870*4882a593Smuzhiyun list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_PFAULT]);
1871*4882a593Smuzhiyun set_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
1872*4882a593Smuzhiyun spin_unlock(&fi->lock);
1873*4882a593Smuzhiyun return 0;
1874*4882a593Smuzhiyun }
1875*4882a593Smuzhiyun
1876*4882a593Smuzhiyun #define CR_PENDING_SUBCLASS 28
__inject_float_mchk(struct kvm * kvm,struct kvm_s390_interrupt_info * inti)1877*4882a593Smuzhiyun static int __inject_float_mchk(struct kvm *kvm,
1878*4882a593Smuzhiyun struct kvm_s390_interrupt_info *inti)
1879*4882a593Smuzhiyun {
1880*4882a593Smuzhiyun struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1881*4882a593Smuzhiyun
1882*4882a593Smuzhiyun kvm->stat.inject_float_mchk++;
1883*4882a593Smuzhiyun spin_lock(&fi->lock);
1884*4882a593Smuzhiyun fi->mchk.cr14 |= inti->mchk.cr14 & (1UL << CR_PENDING_SUBCLASS);
1885*4882a593Smuzhiyun fi->mchk.mcic |= inti->mchk.mcic;
1886*4882a593Smuzhiyun set_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs);
1887*4882a593Smuzhiyun spin_unlock(&fi->lock);
1888*4882a593Smuzhiyun kfree(inti);
1889*4882a593Smuzhiyun return 0;
1890*4882a593Smuzhiyun }
1891*4882a593Smuzhiyun
__inject_io(struct kvm * kvm,struct kvm_s390_interrupt_info * inti)1892*4882a593Smuzhiyun static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1893*4882a593Smuzhiyun {
1894*4882a593Smuzhiyun struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
1895*4882a593Smuzhiyun struct kvm_s390_float_interrupt *fi;
1896*4882a593Smuzhiyun struct list_head *list;
1897*4882a593Smuzhiyun int isc;
1898*4882a593Smuzhiyun
1899*4882a593Smuzhiyun kvm->stat.inject_io++;
1900*4882a593Smuzhiyun isc = int_word_to_isc(inti->io.io_int_word);
1901*4882a593Smuzhiyun
1902*4882a593Smuzhiyun /*
1903*4882a593Smuzhiyun * Do not make use of gisa in protected mode. We do not use the lock
1904*4882a593Smuzhiyun * checking variant as this is just a performance optimization and we
1905*4882a593Smuzhiyun * do not hold the lock here. This is ok as the code will pick
1906*4882a593Smuzhiyun * interrupts from both "lists" for delivery.
1907*4882a593Smuzhiyun */
1908*4882a593Smuzhiyun if (!kvm_s390_pv_get_handle(kvm) &&
1909*4882a593Smuzhiyun gi->origin && inti->type & KVM_S390_INT_IO_AI_MASK) {
1910*4882a593Smuzhiyun VM_EVENT(kvm, 4, "%s isc %1u", "inject: I/O (AI/gisa)", isc);
1911*4882a593Smuzhiyun gisa_set_ipm_gisc(gi->origin, isc);
1912*4882a593Smuzhiyun kfree(inti);
1913*4882a593Smuzhiyun return 0;
1914*4882a593Smuzhiyun }
1915*4882a593Smuzhiyun
1916*4882a593Smuzhiyun fi = &kvm->arch.float_int;
1917*4882a593Smuzhiyun spin_lock(&fi->lock);
1918*4882a593Smuzhiyun if (fi->counters[FIRQ_CNTR_IO] >= KVM_S390_MAX_FLOAT_IRQS) {
1919*4882a593Smuzhiyun spin_unlock(&fi->lock);
1920*4882a593Smuzhiyun return -EBUSY;
1921*4882a593Smuzhiyun }
1922*4882a593Smuzhiyun fi->counters[FIRQ_CNTR_IO] += 1;
1923*4882a593Smuzhiyun
1924*4882a593Smuzhiyun if (inti->type & KVM_S390_INT_IO_AI_MASK)
1925*4882a593Smuzhiyun VM_EVENT(kvm, 4, "%s", "inject: I/O (AI)");
1926*4882a593Smuzhiyun else
1927*4882a593Smuzhiyun VM_EVENT(kvm, 4, "inject: I/O %x ss %x schid %04x",
1928*4882a593Smuzhiyun inti->io.subchannel_id >> 8,
1929*4882a593Smuzhiyun inti->io.subchannel_id >> 1 & 0x3,
1930*4882a593Smuzhiyun inti->io.subchannel_nr);
1931*4882a593Smuzhiyun list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
1932*4882a593Smuzhiyun list_add_tail(&inti->list, list);
1933*4882a593Smuzhiyun set_bit(isc_to_irq_type(isc), &fi->pending_irqs);
1934*4882a593Smuzhiyun spin_unlock(&fi->lock);
1935*4882a593Smuzhiyun return 0;
1936*4882a593Smuzhiyun }
1937*4882a593Smuzhiyun
1938*4882a593Smuzhiyun /*
1939*4882a593Smuzhiyun * Find a destination VCPU for a floating irq and kick it.
1940*4882a593Smuzhiyun */
__floating_irq_kick(struct kvm * kvm,u64 type)1941*4882a593Smuzhiyun static void __floating_irq_kick(struct kvm *kvm, u64 type)
1942*4882a593Smuzhiyun {
1943*4882a593Smuzhiyun struct kvm_vcpu *dst_vcpu;
1944*4882a593Smuzhiyun int sigcpu, online_vcpus, nr_tries = 0;
1945*4882a593Smuzhiyun
1946*4882a593Smuzhiyun online_vcpus = atomic_read(&kvm->online_vcpus);
1947*4882a593Smuzhiyun if (!online_vcpus)
1948*4882a593Smuzhiyun return;
1949*4882a593Smuzhiyun
1950*4882a593Smuzhiyun /* find idle VCPUs first, then round robin */
1951*4882a593Smuzhiyun sigcpu = find_first_bit(kvm->arch.idle_mask, online_vcpus);
1952*4882a593Smuzhiyun if (sigcpu == online_vcpus) {
1953*4882a593Smuzhiyun do {
1954*4882a593Smuzhiyun sigcpu = kvm->arch.float_int.next_rr_cpu++;
1955*4882a593Smuzhiyun kvm->arch.float_int.next_rr_cpu %= online_vcpus;
1956*4882a593Smuzhiyun /* avoid endless loops if all vcpus are stopped */
1957*4882a593Smuzhiyun if (nr_tries++ >= online_vcpus)
1958*4882a593Smuzhiyun return;
1959*4882a593Smuzhiyun } while (is_vcpu_stopped(kvm_get_vcpu(kvm, sigcpu)));
1960*4882a593Smuzhiyun }
1961*4882a593Smuzhiyun dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
1962*4882a593Smuzhiyun
1963*4882a593Smuzhiyun /* make the VCPU drop out of the SIE, or wake it up if sleeping */
1964*4882a593Smuzhiyun switch (type) {
1965*4882a593Smuzhiyun case KVM_S390_MCHK:
1966*4882a593Smuzhiyun kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_STOP_INT);
1967*4882a593Smuzhiyun break;
1968*4882a593Smuzhiyun case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1969*4882a593Smuzhiyun if (!(type & KVM_S390_INT_IO_AI_MASK &&
1970*4882a593Smuzhiyun kvm->arch.gisa_int.origin) ||
1971*4882a593Smuzhiyun kvm_s390_pv_cpu_get_handle(dst_vcpu))
1972*4882a593Smuzhiyun kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_IO_INT);
1973*4882a593Smuzhiyun break;
1974*4882a593Smuzhiyun default:
1975*4882a593Smuzhiyun kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_EXT_INT);
1976*4882a593Smuzhiyun break;
1977*4882a593Smuzhiyun }
1978*4882a593Smuzhiyun kvm_s390_vcpu_wakeup(dst_vcpu);
1979*4882a593Smuzhiyun }
1980*4882a593Smuzhiyun
__inject_vm(struct kvm * kvm,struct kvm_s390_interrupt_info * inti)1981*4882a593Smuzhiyun static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1982*4882a593Smuzhiyun {
1983*4882a593Smuzhiyun u64 type = READ_ONCE(inti->type);
1984*4882a593Smuzhiyun int rc;
1985*4882a593Smuzhiyun
1986*4882a593Smuzhiyun switch (type) {
1987*4882a593Smuzhiyun case KVM_S390_MCHK:
1988*4882a593Smuzhiyun rc = __inject_float_mchk(kvm, inti);
1989*4882a593Smuzhiyun break;
1990*4882a593Smuzhiyun case KVM_S390_INT_VIRTIO:
1991*4882a593Smuzhiyun rc = __inject_virtio(kvm, inti);
1992*4882a593Smuzhiyun break;
1993*4882a593Smuzhiyun case KVM_S390_INT_SERVICE:
1994*4882a593Smuzhiyun rc = __inject_service(kvm, inti);
1995*4882a593Smuzhiyun break;
1996*4882a593Smuzhiyun case KVM_S390_INT_PFAULT_DONE:
1997*4882a593Smuzhiyun rc = __inject_pfault_done(kvm, inti);
1998*4882a593Smuzhiyun break;
1999*4882a593Smuzhiyun case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
2000*4882a593Smuzhiyun rc = __inject_io(kvm, inti);
2001*4882a593Smuzhiyun break;
2002*4882a593Smuzhiyun default:
2003*4882a593Smuzhiyun rc = -EINVAL;
2004*4882a593Smuzhiyun }
2005*4882a593Smuzhiyun if (rc)
2006*4882a593Smuzhiyun return rc;
2007*4882a593Smuzhiyun
2008*4882a593Smuzhiyun __floating_irq_kick(kvm, type);
2009*4882a593Smuzhiyun return 0;
2010*4882a593Smuzhiyun }
2011*4882a593Smuzhiyun
kvm_s390_inject_vm(struct kvm * kvm,struct kvm_s390_interrupt * s390int)2012*4882a593Smuzhiyun int kvm_s390_inject_vm(struct kvm *kvm,
2013*4882a593Smuzhiyun struct kvm_s390_interrupt *s390int)
2014*4882a593Smuzhiyun {
2015*4882a593Smuzhiyun struct kvm_s390_interrupt_info *inti;
2016*4882a593Smuzhiyun int rc;
2017*4882a593Smuzhiyun
2018*4882a593Smuzhiyun inti = kzalloc(sizeof(*inti), GFP_KERNEL);
2019*4882a593Smuzhiyun if (!inti)
2020*4882a593Smuzhiyun return -ENOMEM;
2021*4882a593Smuzhiyun
2022*4882a593Smuzhiyun inti->type = s390int->type;
2023*4882a593Smuzhiyun switch (inti->type) {
2024*4882a593Smuzhiyun case KVM_S390_INT_VIRTIO:
2025*4882a593Smuzhiyun VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
2026*4882a593Smuzhiyun s390int->parm, s390int->parm64);
2027*4882a593Smuzhiyun inti->ext.ext_params = s390int->parm;
2028*4882a593Smuzhiyun inti->ext.ext_params2 = s390int->parm64;
2029*4882a593Smuzhiyun break;
2030*4882a593Smuzhiyun case KVM_S390_INT_SERVICE:
2031*4882a593Smuzhiyun VM_EVENT(kvm, 4, "inject: sclp parm:%x", s390int->parm);
2032*4882a593Smuzhiyun inti->ext.ext_params = s390int->parm;
2033*4882a593Smuzhiyun break;
2034*4882a593Smuzhiyun case KVM_S390_INT_PFAULT_DONE:
2035*4882a593Smuzhiyun inti->ext.ext_params2 = s390int->parm64;
2036*4882a593Smuzhiyun break;
2037*4882a593Smuzhiyun case KVM_S390_MCHK:
2038*4882a593Smuzhiyun VM_EVENT(kvm, 3, "inject: machine check mcic 0x%llx",
2039*4882a593Smuzhiyun s390int->parm64);
2040*4882a593Smuzhiyun inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
2041*4882a593Smuzhiyun inti->mchk.mcic = s390int->parm64;
2042*4882a593Smuzhiyun break;
2043*4882a593Smuzhiyun case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
2044*4882a593Smuzhiyun inti->io.subchannel_id = s390int->parm >> 16;
2045*4882a593Smuzhiyun inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
2046*4882a593Smuzhiyun inti->io.io_int_parm = s390int->parm64 >> 32;
2047*4882a593Smuzhiyun inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull;
2048*4882a593Smuzhiyun break;
2049*4882a593Smuzhiyun default:
2050*4882a593Smuzhiyun kfree(inti);
2051*4882a593Smuzhiyun return -EINVAL;
2052*4882a593Smuzhiyun }
2053*4882a593Smuzhiyun trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
2054*4882a593Smuzhiyun 2);
2055*4882a593Smuzhiyun
2056*4882a593Smuzhiyun rc = __inject_vm(kvm, inti);
2057*4882a593Smuzhiyun if (rc)
2058*4882a593Smuzhiyun kfree(inti);
2059*4882a593Smuzhiyun return rc;
2060*4882a593Smuzhiyun }
2061*4882a593Smuzhiyun
kvm_s390_reinject_io_int(struct kvm * kvm,struct kvm_s390_interrupt_info * inti)2062*4882a593Smuzhiyun int kvm_s390_reinject_io_int(struct kvm *kvm,
2063*4882a593Smuzhiyun struct kvm_s390_interrupt_info *inti)
2064*4882a593Smuzhiyun {
2065*4882a593Smuzhiyun return __inject_vm(kvm, inti);
2066*4882a593Smuzhiyun }
2067*4882a593Smuzhiyun
s390int_to_s390irq(struct kvm_s390_interrupt * s390int,struct kvm_s390_irq * irq)2068*4882a593Smuzhiyun int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
2069*4882a593Smuzhiyun struct kvm_s390_irq *irq)
2070*4882a593Smuzhiyun {
2071*4882a593Smuzhiyun irq->type = s390int->type;
2072*4882a593Smuzhiyun switch (irq->type) {
2073*4882a593Smuzhiyun case KVM_S390_PROGRAM_INT:
2074*4882a593Smuzhiyun if (s390int->parm & 0xffff0000)
2075*4882a593Smuzhiyun return -EINVAL;
2076*4882a593Smuzhiyun irq->u.pgm.code = s390int->parm;
2077*4882a593Smuzhiyun break;
2078*4882a593Smuzhiyun case KVM_S390_SIGP_SET_PREFIX:
2079*4882a593Smuzhiyun irq->u.prefix.address = s390int->parm;
2080*4882a593Smuzhiyun break;
2081*4882a593Smuzhiyun case KVM_S390_SIGP_STOP:
2082*4882a593Smuzhiyun irq->u.stop.flags = s390int->parm;
2083*4882a593Smuzhiyun break;
2084*4882a593Smuzhiyun case KVM_S390_INT_EXTERNAL_CALL:
2085*4882a593Smuzhiyun if (s390int->parm & 0xffff0000)
2086*4882a593Smuzhiyun return -EINVAL;
2087*4882a593Smuzhiyun irq->u.extcall.code = s390int->parm;
2088*4882a593Smuzhiyun break;
2089*4882a593Smuzhiyun case KVM_S390_INT_EMERGENCY:
2090*4882a593Smuzhiyun if (s390int->parm & 0xffff0000)
2091*4882a593Smuzhiyun return -EINVAL;
2092*4882a593Smuzhiyun irq->u.emerg.code = s390int->parm;
2093*4882a593Smuzhiyun break;
2094*4882a593Smuzhiyun case KVM_S390_MCHK:
2095*4882a593Smuzhiyun irq->u.mchk.mcic = s390int->parm64;
2096*4882a593Smuzhiyun break;
2097*4882a593Smuzhiyun case KVM_S390_INT_PFAULT_INIT:
2098*4882a593Smuzhiyun irq->u.ext.ext_params = s390int->parm;
2099*4882a593Smuzhiyun irq->u.ext.ext_params2 = s390int->parm64;
2100*4882a593Smuzhiyun break;
2101*4882a593Smuzhiyun case KVM_S390_RESTART:
2102*4882a593Smuzhiyun case KVM_S390_INT_CLOCK_COMP:
2103*4882a593Smuzhiyun case KVM_S390_INT_CPU_TIMER:
2104*4882a593Smuzhiyun break;
2105*4882a593Smuzhiyun default:
2106*4882a593Smuzhiyun return -EINVAL;
2107*4882a593Smuzhiyun }
2108*4882a593Smuzhiyun return 0;
2109*4882a593Smuzhiyun }
2110*4882a593Smuzhiyun
kvm_s390_is_stop_irq_pending(struct kvm_vcpu * vcpu)2111*4882a593Smuzhiyun int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu)
2112*4882a593Smuzhiyun {
2113*4882a593Smuzhiyun struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2114*4882a593Smuzhiyun
2115*4882a593Smuzhiyun return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
2116*4882a593Smuzhiyun }
2117*4882a593Smuzhiyun
kvm_s390_is_restart_irq_pending(struct kvm_vcpu * vcpu)2118*4882a593Smuzhiyun int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu)
2119*4882a593Smuzhiyun {
2120*4882a593Smuzhiyun struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2121*4882a593Smuzhiyun
2122*4882a593Smuzhiyun return test_bit(IRQ_PEND_RESTART, &li->pending_irqs);
2123*4882a593Smuzhiyun }
2124*4882a593Smuzhiyun
kvm_s390_clear_stop_irq(struct kvm_vcpu * vcpu)2125*4882a593Smuzhiyun void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
2126*4882a593Smuzhiyun {
2127*4882a593Smuzhiyun struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2128*4882a593Smuzhiyun
2129*4882a593Smuzhiyun spin_lock(&li->lock);
2130*4882a593Smuzhiyun li->irq.stop.flags = 0;
2131*4882a593Smuzhiyun clear_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
2132*4882a593Smuzhiyun spin_unlock(&li->lock);
2133*4882a593Smuzhiyun }
2134*4882a593Smuzhiyun
do_inject_vcpu(struct kvm_vcpu * vcpu,struct kvm_s390_irq * irq)2135*4882a593Smuzhiyun static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
2136*4882a593Smuzhiyun {
2137*4882a593Smuzhiyun int rc;
2138*4882a593Smuzhiyun
2139*4882a593Smuzhiyun switch (irq->type) {
2140*4882a593Smuzhiyun case KVM_S390_PROGRAM_INT:
2141*4882a593Smuzhiyun rc = __inject_prog(vcpu, irq);
2142*4882a593Smuzhiyun break;
2143*4882a593Smuzhiyun case KVM_S390_SIGP_SET_PREFIX:
2144*4882a593Smuzhiyun rc = __inject_set_prefix(vcpu, irq);
2145*4882a593Smuzhiyun break;
2146*4882a593Smuzhiyun case KVM_S390_SIGP_STOP:
2147*4882a593Smuzhiyun rc = __inject_sigp_stop(vcpu, irq);
2148*4882a593Smuzhiyun break;
2149*4882a593Smuzhiyun case KVM_S390_RESTART:
2150*4882a593Smuzhiyun rc = __inject_sigp_restart(vcpu);
2151*4882a593Smuzhiyun break;
2152*4882a593Smuzhiyun case KVM_S390_INT_CLOCK_COMP:
2153*4882a593Smuzhiyun rc = __inject_ckc(vcpu);
2154*4882a593Smuzhiyun break;
2155*4882a593Smuzhiyun case KVM_S390_INT_CPU_TIMER:
2156*4882a593Smuzhiyun rc = __inject_cpu_timer(vcpu);
2157*4882a593Smuzhiyun break;
2158*4882a593Smuzhiyun case KVM_S390_INT_EXTERNAL_CALL:
2159*4882a593Smuzhiyun rc = __inject_extcall(vcpu, irq);
2160*4882a593Smuzhiyun break;
2161*4882a593Smuzhiyun case KVM_S390_INT_EMERGENCY:
2162*4882a593Smuzhiyun rc = __inject_sigp_emergency(vcpu, irq);
2163*4882a593Smuzhiyun break;
2164*4882a593Smuzhiyun case KVM_S390_MCHK:
2165*4882a593Smuzhiyun rc = __inject_mchk(vcpu, irq);
2166*4882a593Smuzhiyun break;
2167*4882a593Smuzhiyun case KVM_S390_INT_PFAULT_INIT:
2168*4882a593Smuzhiyun rc = __inject_pfault_init(vcpu, irq);
2169*4882a593Smuzhiyun break;
2170*4882a593Smuzhiyun case KVM_S390_INT_VIRTIO:
2171*4882a593Smuzhiyun case KVM_S390_INT_SERVICE:
2172*4882a593Smuzhiyun case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
2173*4882a593Smuzhiyun default:
2174*4882a593Smuzhiyun rc = -EINVAL;
2175*4882a593Smuzhiyun }
2176*4882a593Smuzhiyun
2177*4882a593Smuzhiyun return rc;
2178*4882a593Smuzhiyun }
2179*4882a593Smuzhiyun
kvm_s390_inject_vcpu(struct kvm_vcpu * vcpu,struct kvm_s390_irq * irq)2180*4882a593Smuzhiyun int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
2181*4882a593Smuzhiyun {
2182*4882a593Smuzhiyun struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2183*4882a593Smuzhiyun int rc;
2184*4882a593Smuzhiyun
2185*4882a593Smuzhiyun spin_lock(&li->lock);
2186*4882a593Smuzhiyun rc = do_inject_vcpu(vcpu, irq);
2187*4882a593Smuzhiyun spin_unlock(&li->lock);
2188*4882a593Smuzhiyun if (!rc)
2189*4882a593Smuzhiyun kvm_s390_vcpu_wakeup(vcpu);
2190*4882a593Smuzhiyun return rc;
2191*4882a593Smuzhiyun }
2192*4882a593Smuzhiyun
clear_irq_list(struct list_head * _list)2193*4882a593Smuzhiyun static inline void clear_irq_list(struct list_head *_list)
2194*4882a593Smuzhiyun {
2195*4882a593Smuzhiyun struct kvm_s390_interrupt_info *inti, *n;
2196*4882a593Smuzhiyun
2197*4882a593Smuzhiyun list_for_each_entry_safe(inti, n, _list, list) {
2198*4882a593Smuzhiyun list_del(&inti->list);
2199*4882a593Smuzhiyun kfree(inti);
2200*4882a593Smuzhiyun }
2201*4882a593Smuzhiyun }
2202*4882a593Smuzhiyun
inti_to_irq(struct kvm_s390_interrupt_info * inti,struct kvm_s390_irq * irq)2203*4882a593Smuzhiyun static void inti_to_irq(struct kvm_s390_interrupt_info *inti,
2204*4882a593Smuzhiyun struct kvm_s390_irq *irq)
2205*4882a593Smuzhiyun {
2206*4882a593Smuzhiyun irq->type = inti->type;
2207*4882a593Smuzhiyun switch (inti->type) {
2208*4882a593Smuzhiyun case KVM_S390_INT_PFAULT_INIT:
2209*4882a593Smuzhiyun case KVM_S390_INT_PFAULT_DONE:
2210*4882a593Smuzhiyun case KVM_S390_INT_VIRTIO:
2211*4882a593Smuzhiyun irq->u.ext = inti->ext;
2212*4882a593Smuzhiyun break;
2213*4882a593Smuzhiyun case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
2214*4882a593Smuzhiyun irq->u.io = inti->io;
2215*4882a593Smuzhiyun break;
2216*4882a593Smuzhiyun }
2217*4882a593Smuzhiyun }
2218*4882a593Smuzhiyun
kvm_s390_clear_float_irqs(struct kvm * kvm)2219*4882a593Smuzhiyun void kvm_s390_clear_float_irqs(struct kvm *kvm)
2220*4882a593Smuzhiyun {
2221*4882a593Smuzhiyun struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2222*4882a593Smuzhiyun int i;
2223*4882a593Smuzhiyun
2224*4882a593Smuzhiyun mutex_lock(&kvm->lock);
2225*4882a593Smuzhiyun if (!kvm_s390_pv_is_protected(kvm))
2226*4882a593Smuzhiyun fi->masked_irqs = 0;
2227*4882a593Smuzhiyun mutex_unlock(&kvm->lock);
2228*4882a593Smuzhiyun spin_lock(&fi->lock);
2229*4882a593Smuzhiyun fi->pending_irqs = 0;
2230*4882a593Smuzhiyun memset(&fi->srv_signal, 0, sizeof(fi->srv_signal));
2231*4882a593Smuzhiyun memset(&fi->mchk, 0, sizeof(fi->mchk));
2232*4882a593Smuzhiyun for (i = 0; i < FIRQ_LIST_COUNT; i++)
2233*4882a593Smuzhiyun clear_irq_list(&fi->lists[i]);
2234*4882a593Smuzhiyun for (i = 0; i < FIRQ_MAX_COUNT; i++)
2235*4882a593Smuzhiyun fi->counters[i] = 0;
2236*4882a593Smuzhiyun spin_unlock(&fi->lock);
2237*4882a593Smuzhiyun kvm_s390_gisa_clear(kvm);
2238*4882a593Smuzhiyun };
2239*4882a593Smuzhiyun
get_all_floating_irqs(struct kvm * kvm,u8 __user * usrbuf,u64 len)2240*4882a593Smuzhiyun static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
2241*4882a593Smuzhiyun {
2242*4882a593Smuzhiyun struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
2243*4882a593Smuzhiyun struct kvm_s390_interrupt_info *inti;
2244*4882a593Smuzhiyun struct kvm_s390_float_interrupt *fi;
2245*4882a593Smuzhiyun struct kvm_s390_irq *buf;
2246*4882a593Smuzhiyun struct kvm_s390_irq *irq;
2247*4882a593Smuzhiyun int max_irqs;
2248*4882a593Smuzhiyun int ret = 0;
2249*4882a593Smuzhiyun int n = 0;
2250*4882a593Smuzhiyun int i;
2251*4882a593Smuzhiyun
2252*4882a593Smuzhiyun if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0)
2253*4882a593Smuzhiyun return -EINVAL;
2254*4882a593Smuzhiyun
2255*4882a593Smuzhiyun /*
2256*4882a593Smuzhiyun * We are already using -ENOMEM to signal
2257*4882a593Smuzhiyun * userspace it may retry with a bigger buffer,
2258*4882a593Smuzhiyun * so we need to use something else for this case
2259*4882a593Smuzhiyun */
2260*4882a593Smuzhiyun buf = vzalloc(len);
2261*4882a593Smuzhiyun if (!buf)
2262*4882a593Smuzhiyun return -ENOBUFS;
2263*4882a593Smuzhiyun
2264*4882a593Smuzhiyun max_irqs = len / sizeof(struct kvm_s390_irq);
2265*4882a593Smuzhiyun
2266*4882a593Smuzhiyun if (gi->origin && gisa_get_ipm(gi->origin)) {
2267*4882a593Smuzhiyun for (i = 0; i <= MAX_ISC; i++) {
2268*4882a593Smuzhiyun if (n == max_irqs) {
2269*4882a593Smuzhiyun /* signal userspace to try again */
2270*4882a593Smuzhiyun ret = -ENOMEM;
2271*4882a593Smuzhiyun goto out_nolock;
2272*4882a593Smuzhiyun }
2273*4882a593Smuzhiyun if (gisa_tac_ipm_gisc(gi->origin, i)) {
2274*4882a593Smuzhiyun irq = (struct kvm_s390_irq *) &buf[n];
2275*4882a593Smuzhiyun irq->type = KVM_S390_INT_IO(1, 0, 0, 0);
2276*4882a593Smuzhiyun irq->u.io.io_int_word = isc_to_int_word(i);
2277*4882a593Smuzhiyun n++;
2278*4882a593Smuzhiyun }
2279*4882a593Smuzhiyun }
2280*4882a593Smuzhiyun }
2281*4882a593Smuzhiyun fi = &kvm->arch.float_int;
2282*4882a593Smuzhiyun spin_lock(&fi->lock);
2283*4882a593Smuzhiyun for (i = 0; i < FIRQ_LIST_COUNT; i++) {
2284*4882a593Smuzhiyun list_for_each_entry(inti, &fi->lists[i], list) {
2285*4882a593Smuzhiyun if (n == max_irqs) {
2286*4882a593Smuzhiyun /* signal userspace to try again */
2287*4882a593Smuzhiyun ret = -ENOMEM;
2288*4882a593Smuzhiyun goto out;
2289*4882a593Smuzhiyun }
2290*4882a593Smuzhiyun inti_to_irq(inti, &buf[n]);
2291*4882a593Smuzhiyun n++;
2292*4882a593Smuzhiyun }
2293*4882a593Smuzhiyun }
2294*4882a593Smuzhiyun if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs) ||
2295*4882a593Smuzhiyun test_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs)) {
2296*4882a593Smuzhiyun if (n == max_irqs) {
2297*4882a593Smuzhiyun /* signal userspace to try again */
2298*4882a593Smuzhiyun ret = -ENOMEM;
2299*4882a593Smuzhiyun goto out;
2300*4882a593Smuzhiyun }
2301*4882a593Smuzhiyun irq = (struct kvm_s390_irq *) &buf[n];
2302*4882a593Smuzhiyun irq->type = KVM_S390_INT_SERVICE;
2303*4882a593Smuzhiyun irq->u.ext = fi->srv_signal;
2304*4882a593Smuzhiyun n++;
2305*4882a593Smuzhiyun }
2306*4882a593Smuzhiyun if (test_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
2307*4882a593Smuzhiyun if (n == max_irqs) {
2308*4882a593Smuzhiyun /* signal userspace to try again */
2309*4882a593Smuzhiyun ret = -ENOMEM;
2310*4882a593Smuzhiyun goto out;
2311*4882a593Smuzhiyun }
2312*4882a593Smuzhiyun irq = (struct kvm_s390_irq *) &buf[n];
2313*4882a593Smuzhiyun irq->type = KVM_S390_MCHK;
2314*4882a593Smuzhiyun irq->u.mchk = fi->mchk;
2315*4882a593Smuzhiyun n++;
2316*4882a593Smuzhiyun }
2317*4882a593Smuzhiyun
2318*4882a593Smuzhiyun out:
2319*4882a593Smuzhiyun spin_unlock(&fi->lock);
2320*4882a593Smuzhiyun out_nolock:
2321*4882a593Smuzhiyun if (!ret && n > 0) {
2322*4882a593Smuzhiyun if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n))
2323*4882a593Smuzhiyun ret = -EFAULT;
2324*4882a593Smuzhiyun }
2325*4882a593Smuzhiyun vfree(buf);
2326*4882a593Smuzhiyun
2327*4882a593Smuzhiyun return ret < 0 ? ret : n;
2328*4882a593Smuzhiyun }
2329*4882a593Smuzhiyun
flic_ais_mode_get_all(struct kvm * kvm,struct kvm_device_attr * attr)2330*4882a593Smuzhiyun static int flic_ais_mode_get_all(struct kvm *kvm, struct kvm_device_attr *attr)
2331*4882a593Smuzhiyun {
2332*4882a593Smuzhiyun struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2333*4882a593Smuzhiyun struct kvm_s390_ais_all ais;
2334*4882a593Smuzhiyun
2335*4882a593Smuzhiyun if (attr->attr < sizeof(ais))
2336*4882a593Smuzhiyun return -EINVAL;
2337*4882a593Smuzhiyun
2338*4882a593Smuzhiyun if (!test_kvm_facility(kvm, 72))
2339*4882a593Smuzhiyun return -EOPNOTSUPP;
2340*4882a593Smuzhiyun
2341*4882a593Smuzhiyun mutex_lock(&fi->ais_lock);
2342*4882a593Smuzhiyun ais.simm = fi->simm;
2343*4882a593Smuzhiyun ais.nimm = fi->nimm;
2344*4882a593Smuzhiyun mutex_unlock(&fi->ais_lock);
2345*4882a593Smuzhiyun
2346*4882a593Smuzhiyun if (copy_to_user((void __user *)attr->addr, &ais, sizeof(ais)))
2347*4882a593Smuzhiyun return -EFAULT;
2348*4882a593Smuzhiyun
2349*4882a593Smuzhiyun return 0;
2350*4882a593Smuzhiyun }
2351*4882a593Smuzhiyun
flic_get_attr(struct kvm_device * dev,struct kvm_device_attr * attr)2352*4882a593Smuzhiyun static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2353*4882a593Smuzhiyun {
2354*4882a593Smuzhiyun int r;
2355*4882a593Smuzhiyun
2356*4882a593Smuzhiyun switch (attr->group) {
2357*4882a593Smuzhiyun case KVM_DEV_FLIC_GET_ALL_IRQS:
2358*4882a593Smuzhiyun r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr,
2359*4882a593Smuzhiyun attr->attr);
2360*4882a593Smuzhiyun break;
2361*4882a593Smuzhiyun case KVM_DEV_FLIC_AISM_ALL:
2362*4882a593Smuzhiyun r = flic_ais_mode_get_all(dev->kvm, attr);
2363*4882a593Smuzhiyun break;
2364*4882a593Smuzhiyun default:
2365*4882a593Smuzhiyun r = -EINVAL;
2366*4882a593Smuzhiyun }
2367*4882a593Smuzhiyun
2368*4882a593Smuzhiyun return r;
2369*4882a593Smuzhiyun }
2370*4882a593Smuzhiyun
copy_irq_from_user(struct kvm_s390_interrupt_info * inti,u64 addr)2371*4882a593Smuzhiyun static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti,
2372*4882a593Smuzhiyun u64 addr)
2373*4882a593Smuzhiyun {
2374*4882a593Smuzhiyun struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
2375*4882a593Smuzhiyun void *target = NULL;
2376*4882a593Smuzhiyun void __user *source;
2377*4882a593Smuzhiyun u64 size;
2378*4882a593Smuzhiyun
2379*4882a593Smuzhiyun if (get_user(inti->type, (u64 __user *)addr))
2380*4882a593Smuzhiyun return -EFAULT;
2381*4882a593Smuzhiyun
2382*4882a593Smuzhiyun switch (inti->type) {
2383*4882a593Smuzhiyun case KVM_S390_INT_PFAULT_INIT:
2384*4882a593Smuzhiyun case KVM_S390_INT_PFAULT_DONE:
2385*4882a593Smuzhiyun case KVM_S390_INT_VIRTIO:
2386*4882a593Smuzhiyun case KVM_S390_INT_SERVICE:
2387*4882a593Smuzhiyun target = (void *) &inti->ext;
2388*4882a593Smuzhiyun source = &uptr->u.ext;
2389*4882a593Smuzhiyun size = sizeof(inti->ext);
2390*4882a593Smuzhiyun break;
2391*4882a593Smuzhiyun case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
2392*4882a593Smuzhiyun target = (void *) &inti->io;
2393*4882a593Smuzhiyun source = &uptr->u.io;
2394*4882a593Smuzhiyun size = sizeof(inti->io);
2395*4882a593Smuzhiyun break;
2396*4882a593Smuzhiyun case KVM_S390_MCHK:
2397*4882a593Smuzhiyun target = (void *) &inti->mchk;
2398*4882a593Smuzhiyun source = &uptr->u.mchk;
2399*4882a593Smuzhiyun size = sizeof(inti->mchk);
2400*4882a593Smuzhiyun break;
2401*4882a593Smuzhiyun default:
2402*4882a593Smuzhiyun return -EINVAL;
2403*4882a593Smuzhiyun }
2404*4882a593Smuzhiyun
2405*4882a593Smuzhiyun if (copy_from_user(target, source, size))
2406*4882a593Smuzhiyun return -EFAULT;
2407*4882a593Smuzhiyun
2408*4882a593Smuzhiyun return 0;
2409*4882a593Smuzhiyun }
2410*4882a593Smuzhiyun
enqueue_floating_irq(struct kvm_device * dev,struct kvm_device_attr * attr)2411*4882a593Smuzhiyun static int enqueue_floating_irq(struct kvm_device *dev,
2412*4882a593Smuzhiyun struct kvm_device_attr *attr)
2413*4882a593Smuzhiyun {
2414*4882a593Smuzhiyun struct kvm_s390_interrupt_info *inti = NULL;
2415*4882a593Smuzhiyun int r = 0;
2416*4882a593Smuzhiyun int len = attr->attr;
2417*4882a593Smuzhiyun
2418*4882a593Smuzhiyun if (len % sizeof(struct kvm_s390_irq) != 0)
2419*4882a593Smuzhiyun return -EINVAL;
2420*4882a593Smuzhiyun else if (len > KVM_S390_FLIC_MAX_BUFFER)
2421*4882a593Smuzhiyun return -EINVAL;
2422*4882a593Smuzhiyun
2423*4882a593Smuzhiyun while (len >= sizeof(struct kvm_s390_irq)) {
2424*4882a593Smuzhiyun inti = kzalloc(sizeof(*inti), GFP_KERNEL);
2425*4882a593Smuzhiyun if (!inti)
2426*4882a593Smuzhiyun return -ENOMEM;
2427*4882a593Smuzhiyun
2428*4882a593Smuzhiyun r = copy_irq_from_user(inti, attr->addr);
2429*4882a593Smuzhiyun if (r) {
2430*4882a593Smuzhiyun kfree(inti);
2431*4882a593Smuzhiyun return r;
2432*4882a593Smuzhiyun }
2433*4882a593Smuzhiyun r = __inject_vm(dev->kvm, inti);
2434*4882a593Smuzhiyun if (r) {
2435*4882a593Smuzhiyun kfree(inti);
2436*4882a593Smuzhiyun return r;
2437*4882a593Smuzhiyun }
2438*4882a593Smuzhiyun len -= sizeof(struct kvm_s390_irq);
2439*4882a593Smuzhiyun attr->addr += sizeof(struct kvm_s390_irq);
2440*4882a593Smuzhiyun }
2441*4882a593Smuzhiyun
2442*4882a593Smuzhiyun return r;
2443*4882a593Smuzhiyun }
2444*4882a593Smuzhiyun
get_io_adapter(struct kvm * kvm,unsigned int id)2445*4882a593Smuzhiyun static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
2446*4882a593Smuzhiyun {
2447*4882a593Smuzhiyun if (id >= MAX_S390_IO_ADAPTERS)
2448*4882a593Smuzhiyun return NULL;
2449*4882a593Smuzhiyun id = array_index_nospec(id, MAX_S390_IO_ADAPTERS);
2450*4882a593Smuzhiyun return kvm->arch.adapters[id];
2451*4882a593Smuzhiyun }
2452*4882a593Smuzhiyun
register_io_adapter(struct kvm_device * dev,struct kvm_device_attr * attr)2453*4882a593Smuzhiyun static int register_io_adapter(struct kvm_device *dev,
2454*4882a593Smuzhiyun struct kvm_device_attr *attr)
2455*4882a593Smuzhiyun {
2456*4882a593Smuzhiyun struct s390_io_adapter *adapter;
2457*4882a593Smuzhiyun struct kvm_s390_io_adapter adapter_info;
2458*4882a593Smuzhiyun
2459*4882a593Smuzhiyun if (copy_from_user(&adapter_info,
2460*4882a593Smuzhiyun (void __user *)attr->addr, sizeof(adapter_info)))
2461*4882a593Smuzhiyun return -EFAULT;
2462*4882a593Smuzhiyun
2463*4882a593Smuzhiyun if (adapter_info.id >= MAX_S390_IO_ADAPTERS)
2464*4882a593Smuzhiyun return -EINVAL;
2465*4882a593Smuzhiyun
2466*4882a593Smuzhiyun adapter_info.id = array_index_nospec(adapter_info.id,
2467*4882a593Smuzhiyun MAX_S390_IO_ADAPTERS);
2468*4882a593Smuzhiyun
2469*4882a593Smuzhiyun if (dev->kvm->arch.adapters[adapter_info.id] != NULL)
2470*4882a593Smuzhiyun return -EINVAL;
2471*4882a593Smuzhiyun
2472*4882a593Smuzhiyun adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2473*4882a593Smuzhiyun if (!adapter)
2474*4882a593Smuzhiyun return -ENOMEM;
2475*4882a593Smuzhiyun
2476*4882a593Smuzhiyun adapter->id = adapter_info.id;
2477*4882a593Smuzhiyun adapter->isc = adapter_info.isc;
2478*4882a593Smuzhiyun adapter->maskable = adapter_info.maskable;
2479*4882a593Smuzhiyun adapter->masked = false;
2480*4882a593Smuzhiyun adapter->swap = adapter_info.swap;
2481*4882a593Smuzhiyun adapter->suppressible = (adapter_info.flags) &
2482*4882a593Smuzhiyun KVM_S390_ADAPTER_SUPPRESSIBLE;
2483*4882a593Smuzhiyun dev->kvm->arch.adapters[adapter->id] = adapter;
2484*4882a593Smuzhiyun
2485*4882a593Smuzhiyun return 0;
2486*4882a593Smuzhiyun }
2487*4882a593Smuzhiyun
kvm_s390_mask_adapter(struct kvm * kvm,unsigned int id,bool masked)2488*4882a593Smuzhiyun int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
2489*4882a593Smuzhiyun {
2490*4882a593Smuzhiyun int ret;
2491*4882a593Smuzhiyun struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
2492*4882a593Smuzhiyun
2493*4882a593Smuzhiyun if (!adapter || !adapter->maskable)
2494*4882a593Smuzhiyun return -EINVAL;
2495*4882a593Smuzhiyun ret = adapter->masked;
2496*4882a593Smuzhiyun adapter->masked = masked;
2497*4882a593Smuzhiyun return ret;
2498*4882a593Smuzhiyun }
2499*4882a593Smuzhiyun
kvm_s390_destroy_adapters(struct kvm * kvm)2500*4882a593Smuzhiyun void kvm_s390_destroy_adapters(struct kvm *kvm)
2501*4882a593Smuzhiyun {
2502*4882a593Smuzhiyun int i;
2503*4882a593Smuzhiyun
2504*4882a593Smuzhiyun for (i = 0; i < MAX_S390_IO_ADAPTERS; i++)
2505*4882a593Smuzhiyun kfree(kvm->arch.adapters[i]);
2506*4882a593Smuzhiyun }
2507*4882a593Smuzhiyun
modify_io_adapter(struct kvm_device * dev,struct kvm_device_attr * attr)2508*4882a593Smuzhiyun static int modify_io_adapter(struct kvm_device *dev,
2509*4882a593Smuzhiyun struct kvm_device_attr *attr)
2510*4882a593Smuzhiyun {
2511*4882a593Smuzhiyun struct kvm_s390_io_adapter_req req;
2512*4882a593Smuzhiyun struct s390_io_adapter *adapter;
2513*4882a593Smuzhiyun int ret;
2514*4882a593Smuzhiyun
2515*4882a593Smuzhiyun if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
2516*4882a593Smuzhiyun return -EFAULT;
2517*4882a593Smuzhiyun
2518*4882a593Smuzhiyun adapter = get_io_adapter(dev->kvm, req.id);
2519*4882a593Smuzhiyun if (!adapter)
2520*4882a593Smuzhiyun return -EINVAL;
2521*4882a593Smuzhiyun switch (req.type) {
2522*4882a593Smuzhiyun case KVM_S390_IO_ADAPTER_MASK:
2523*4882a593Smuzhiyun ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
2524*4882a593Smuzhiyun if (ret > 0)
2525*4882a593Smuzhiyun ret = 0;
2526*4882a593Smuzhiyun break;
2527*4882a593Smuzhiyun /*
2528*4882a593Smuzhiyun * The following operations are no longer needed and therefore no-ops.
2529*4882a593Smuzhiyun * The gpa to hva translation is done when an IRQ route is set up. The
2530*4882a593Smuzhiyun * set_irq code uses get_user_pages_remote() to do the actual write.
2531*4882a593Smuzhiyun */
2532*4882a593Smuzhiyun case KVM_S390_IO_ADAPTER_MAP:
2533*4882a593Smuzhiyun case KVM_S390_IO_ADAPTER_UNMAP:
2534*4882a593Smuzhiyun ret = 0;
2535*4882a593Smuzhiyun break;
2536*4882a593Smuzhiyun default:
2537*4882a593Smuzhiyun ret = -EINVAL;
2538*4882a593Smuzhiyun }
2539*4882a593Smuzhiyun
2540*4882a593Smuzhiyun return ret;
2541*4882a593Smuzhiyun }
2542*4882a593Smuzhiyun
clear_io_irq(struct kvm * kvm,struct kvm_device_attr * attr)2543*4882a593Smuzhiyun static int clear_io_irq(struct kvm *kvm, struct kvm_device_attr *attr)
2544*4882a593Smuzhiyun
2545*4882a593Smuzhiyun {
2546*4882a593Smuzhiyun const u64 isc_mask = 0xffUL << 24; /* all iscs set */
2547*4882a593Smuzhiyun u32 schid;
2548*4882a593Smuzhiyun
2549*4882a593Smuzhiyun if (attr->flags)
2550*4882a593Smuzhiyun return -EINVAL;
2551*4882a593Smuzhiyun if (attr->attr != sizeof(schid))
2552*4882a593Smuzhiyun return -EINVAL;
2553*4882a593Smuzhiyun if (copy_from_user(&schid, (void __user *) attr->addr, sizeof(schid)))
2554*4882a593Smuzhiyun return -EFAULT;
2555*4882a593Smuzhiyun if (!schid)
2556*4882a593Smuzhiyun return -EINVAL;
2557*4882a593Smuzhiyun kfree(kvm_s390_get_io_int(kvm, isc_mask, schid));
2558*4882a593Smuzhiyun /*
2559*4882a593Smuzhiyun * If userspace is conforming to the architecture, we can have at most
2560*4882a593Smuzhiyun * one pending I/O interrupt per subchannel, so this is effectively a
2561*4882a593Smuzhiyun * clear all.
2562*4882a593Smuzhiyun */
2563*4882a593Smuzhiyun return 0;
2564*4882a593Smuzhiyun }
2565*4882a593Smuzhiyun
modify_ais_mode(struct kvm * kvm,struct kvm_device_attr * attr)2566*4882a593Smuzhiyun static int modify_ais_mode(struct kvm *kvm, struct kvm_device_attr *attr)
2567*4882a593Smuzhiyun {
2568*4882a593Smuzhiyun struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2569*4882a593Smuzhiyun struct kvm_s390_ais_req req;
2570*4882a593Smuzhiyun int ret = 0;
2571*4882a593Smuzhiyun
2572*4882a593Smuzhiyun if (!test_kvm_facility(kvm, 72))
2573*4882a593Smuzhiyun return -EOPNOTSUPP;
2574*4882a593Smuzhiyun
2575*4882a593Smuzhiyun if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
2576*4882a593Smuzhiyun return -EFAULT;
2577*4882a593Smuzhiyun
2578*4882a593Smuzhiyun if (req.isc > MAX_ISC)
2579*4882a593Smuzhiyun return -EINVAL;
2580*4882a593Smuzhiyun
2581*4882a593Smuzhiyun trace_kvm_s390_modify_ais_mode(req.isc,
2582*4882a593Smuzhiyun (fi->simm & AIS_MODE_MASK(req.isc)) ?
2583*4882a593Smuzhiyun (fi->nimm & AIS_MODE_MASK(req.isc)) ?
2584*4882a593Smuzhiyun 2 : KVM_S390_AIS_MODE_SINGLE :
2585*4882a593Smuzhiyun KVM_S390_AIS_MODE_ALL, req.mode);
2586*4882a593Smuzhiyun
2587*4882a593Smuzhiyun mutex_lock(&fi->ais_lock);
2588*4882a593Smuzhiyun switch (req.mode) {
2589*4882a593Smuzhiyun case KVM_S390_AIS_MODE_ALL:
2590*4882a593Smuzhiyun fi->simm &= ~AIS_MODE_MASK(req.isc);
2591*4882a593Smuzhiyun fi->nimm &= ~AIS_MODE_MASK(req.isc);
2592*4882a593Smuzhiyun break;
2593*4882a593Smuzhiyun case KVM_S390_AIS_MODE_SINGLE:
2594*4882a593Smuzhiyun fi->simm |= AIS_MODE_MASK(req.isc);
2595*4882a593Smuzhiyun fi->nimm &= ~AIS_MODE_MASK(req.isc);
2596*4882a593Smuzhiyun break;
2597*4882a593Smuzhiyun default:
2598*4882a593Smuzhiyun ret = -EINVAL;
2599*4882a593Smuzhiyun }
2600*4882a593Smuzhiyun mutex_unlock(&fi->ais_lock);
2601*4882a593Smuzhiyun
2602*4882a593Smuzhiyun return ret;
2603*4882a593Smuzhiyun }
2604*4882a593Smuzhiyun
kvm_s390_inject_airq(struct kvm * kvm,struct s390_io_adapter * adapter)2605*4882a593Smuzhiyun static int kvm_s390_inject_airq(struct kvm *kvm,
2606*4882a593Smuzhiyun struct s390_io_adapter *adapter)
2607*4882a593Smuzhiyun {
2608*4882a593Smuzhiyun struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2609*4882a593Smuzhiyun struct kvm_s390_interrupt s390int = {
2610*4882a593Smuzhiyun .type = KVM_S390_INT_IO(1, 0, 0, 0),
2611*4882a593Smuzhiyun .parm = 0,
2612*4882a593Smuzhiyun .parm64 = isc_to_int_word(adapter->isc),
2613*4882a593Smuzhiyun };
2614*4882a593Smuzhiyun int ret = 0;
2615*4882a593Smuzhiyun
2616*4882a593Smuzhiyun if (!test_kvm_facility(kvm, 72) || !adapter->suppressible)
2617*4882a593Smuzhiyun return kvm_s390_inject_vm(kvm, &s390int);
2618*4882a593Smuzhiyun
2619*4882a593Smuzhiyun mutex_lock(&fi->ais_lock);
2620*4882a593Smuzhiyun if (fi->nimm & AIS_MODE_MASK(adapter->isc)) {
2621*4882a593Smuzhiyun trace_kvm_s390_airq_suppressed(adapter->id, adapter->isc);
2622*4882a593Smuzhiyun goto out;
2623*4882a593Smuzhiyun }
2624*4882a593Smuzhiyun
2625*4882a593Smuzhiyun ret = kvm_s390_inject_vm(kvm, &s390int);
2626*4882a593Smuzhiyun if (!ret && (fi->simm & AIS_MODE_MASK(adapter->isc))) {
2627*4882a593Smuzhiyun fi->nimm |= AIS_MODE_MASK(adapter->isc);
2628*4882a593Smuzhiyun trace_kvm_s390_modify_ais_mode(adapter->isc,
2629*4882a593Smuzhiyun KVM_S390_AIS_MODE_SINGLE, 2);
2630*4882a593Smuzhiyun }
2631*4882a593Smuzhiyun out:
2632*4882a593Smuzhiyun mutex_unlock(&fi->ais_lock);
2633*4882a593Smuzhiyun return ret;
2634*4882a593Smuzhiyun }
2635*4882a593Smuzhiyun
flic_inject_airq(struct kvm * kvm,struct kvm_device_attr * attr)2636*4882a593Smuzhiyun static int flic_inject_airq(struct kvm *kvm, struct kvm_device_attr *attr)
2637*4882a593Smuzhiyun {
2638*4882a593Smuzhiyun unsigned int id = attr->attr;
2639*4882a593Smuzhiyun struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
2640*4882a593Smuzhiyun
2641*4882a593Smuzhiyun if (!adapter)
2642*4882a593Smuzhiyun return -EINVAL;
2643*4882a593Smuzhiyun
2644*4882a593Smuzhiyun return kvm_s390_inject_airq(kvm, adapter);
2645*4882a593Smuzhiyun }
2646*4882a593Smuzhiyun
flic_ais_mode_set_all(struct kvm * kvm,struct kvm_device_attr * attr)2647*4882a593Smuzhiyun static int flic_ais_mode_set_all(struct kvm *kvm, struct kvm_device_attr *attr)
2648*4882a593Smuzhiyun {
2649*4882a593Smuzhiyun struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2650*4882a593Smuzhiyun struct kvm_s390_ais_all ais;
2651*4882a593Smuzhiyun
2652*4882a593Smuzhiyun if (!test_kvm_facility(kvm, 72))
2653*4882a593Smuzhiyun return -EOPNOTSUPP;
2654*4882a593Smuzhiyun
2655*4882a593Smuzhiyun if (copy_from_user(&ais, (void __user *)attr->addr, sizeof(ais)))
2656*4882a593Smuzhiyun return -EFAULT;
2657*4882a593Smuzhiyun
2658*4882a593Smuzhiyun mutex_lock(&fi->ais_lock);
2659*4882a593Smuzhiyun fi->simm = ais.simm;
2660*4882a593Smuzhiyun fi->nimm = ais.nimm;
2661*4882a593Smuzhiyun mutex_unlock(&fi->ais_lock);
2662*4882a593Smuzhiyun
2663*4882a593Smuzhiyun return 0;
2664*4882a593Smuzhiyun }
2665*4882a593Smuzhiyun
flic_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)2666*4882a593Smuzhiyun static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2667*4882a593Smuzhiyun {
2668*4882a593Smuzhiyun int r = 0;
2669*4882a593Smuzhiyun unsigned int i;
2670*4882a593Smuzhiyun struct kvm_vcpu *vcpu;
2671*4882a593Smuzhiyun
2672*4882a593Smuzhiyun switch (attr->group) {
2673*4882a593Smuzhiyun case KVM_DEV_FLIC_ENQUEUE:
2674*4882a593Smuzhiyun r = enqueue_floating_irq(dev, attr);
2675*4882a593Smuzhiyun break;
2676*4882a593Smuzhiyun case KVM_DEV_FLIC_CLEAR_IRQS:
2677*4882a593Smuzhiyun kvm_s390_clear_float_irqs(dev->kvm);
2678*4882a593Smuzhiyun break;
2679*4882a593Smuzhiyun case KVM_DEV_FLIC_APF_ENABLE:
2680*4882a593Smuzhiyun dev->kvm->arch.gmap->pfault_enabled = 1;
2681*4882a593Smuzhiyun break;
2682*4882a593Smuzhiyun case KVM_DEV_FLIC_APF_DISABLE_WAIT:
2683*4882a593Smuzhiyun dev->kvm->arch.gmap->pfault_enabled = 0;
2684*4882a593Smuzhiyun /*
2685*4882a593Smuzhiyun * Make sure no async faults are in transition when
2686*4882a593Smuzhiyun * clearing the queues. So we don't need to worry
2687*4882a593Smuzhiyun * about late coming workers.
2688*4882a593Smuzhiyun */
2689*4882a593Smuzhiyun synchronize_srcu(&dev->kvm->srcu);
2690*4882a593Smuzhiyun kvm_for_each_vcpu(i, vcpu, dev->kvm)
2691*4882a593Smuzhiyun kvm_clear_async_pf_completion_queue(vcpu);
2692*4882a593Smuzhiyun break;
2693*4882a593Smuzhiyun case KVM_DEV_FLIC_ADAPTER_REGISTER:
2694*4882a593Smuzhiyun r = register_io_adapter(dev, attr);
2695*4882a593Smuzhiyun break;
2696*4882a593Smuzhiyun case KVM_DEV_FLIC_ADAPTER_MODIFY:
2697*4882a593Smuzhiyun r = modify_io_adapter(dev, attr);
2698*4882a593Smuzhiyun break;
2699*4882a593Smuzhiyun case KVM_DEV_FLIC_CLEAR_IO_IRQ:
2700*4882a593Smuzhiyun r = clear_io_irq(dev->kvm, attr);
2701*4882a593Smuzhiyun break;
2702*4882a593Smuzhiyun case KVM_DEV_FLIC_AISM:
2703*4882a593Smuzhiyun r = modify_ais_mode(dev->kvm, attr);
2704*4882a593Smuzhiyun break;
2705*4882a593Smuzhiyun case KVM_DEV_FLIC_AIRQ_INJECT:
2706*4882a593Smuzhiyun r = flic_inject_airq(dev->kvm, attr);
2707*4882a593Smuzhiyun break;
2708*4882a593Smuzhiyun case KVM_DEV_FLIC_AISM_ALL:
2709*4882a593Smuzhiyun r = flic_ais_mode_set_all(dev->kvm, attr);
2710*4882a593Smuzhiyun break;
2711*4882a593Smuzhiyun default:
2712*4882a593Smuzhiyun r = -EINVAL;
2713*4882a593Smuzhiyun }
2714*4882a593Smuzhiyun
2715*4882a593Smuzhiyun return r;
2716*4882a593Smuzhiyun }
2717*4882a593Smuzhiyun
flic_has_attr(struct kvm_device * dev,struct kvm_device_attr * attr)2718*4882a593Smuzhiyun static int flic_has_attr(struct kvm_device *dev,
2719*4882a593Smuzhiyun struct kvm_device_attr *attr)
2720*4882a593Smuzhiyun {
2721*4882a593Smuzhiyun switch (attr->group) {
2722*4882a593Smuzhiyun case KVM_DEV_FLIC_GET_ALL_IRQS:
2723*4882a593Smuzhiyun case KVM_DEV_FLIC_ENQUEUE:
2724*4882a593Smuzhiyun case KVM_DEV_FLIC_CLEAR_IRQS:
2725*4882a593Smuzhiyun case KVM_DEV_FLIC_APF_ENABLE:
2726*4882a593Smuzhiyun case KVM_DEV_FLIC_APF_DISABLE_WAIT:
2727*4882a593Smuzhiyun case KVM_DEV_FLIC_ADAPTER_REGISTER:
2728*4882a593Smuzhiyun case KVM_DEV_FLIC_ADAPTER_MODIFY:
2729*4882a593Smuzhiyun case KVM_DEV_FLIC_CLEAR_IO_IRQ:
2730*4882a593Smuzhiyun case KVM_DEV_FLIC_AISM:
2731*4882a593Smuzhiyun case KVM_DEV_FLIC_AIRQ_INJECT:
2732*4882a593Smuzhiyun case KVM_DEV_FLIC_AISM_ALL:
2733*4882a593Smuzhiyun return 0;
2734*4882a593Smuzhiyun }
2735*4882a593Smuzhiyun return -ENXIO;
2736*4882a593Smuzhiyun }
2737*4882a593Smuzhiyun
flic_create(struct kvm_device * dev,u32 type)2738*4882a593Smuzhiyun static int flic_create(struct kvm_device *dev, u32 type)
2739*4882a593Smuzhiyun {
2740*4882a593Smuzhiyun if (!dev)
2741*4882a593Smuzhiyun return -EINVAL;
2742*4882a593Smuzhiyun if (dev->kvm->arch.flic)
2743*4882a593Smuzhiyun return -EINVAL;
2744*4882a593Smuzhiyun dev->kvm->arch.flic = dev;
2745*4882a593Smuzhiyun return 0;
2746*4882a593Smuzhiyun }
2747*4882a593Smuzhiyun
flic_destroy(struct kvm_device * dev)2748*4882a593Smuzhiyun static void flic_destroy(struct kvm_device *dev)
2749*4882a593Smuzhiyun {
2750*4882a593Smuzhiyun dev->kvm->arch.flic = NULL;
2751*4882a593Smuzhiyun kfree(dev);
2752*4882a593Smuzhiyun }
2753*4882a593Smuzhiyun
2754*4882a593Smuzhiyun /* s390 floating irq controller (flic) */
2755*4882a593Smuzhiyun struct kvm_device_ops kvm_flic_ops = {
2756*4882a593Smuzhiyun .name = "kvm-flic",
2757*4882a593Smuzhiyun .get_attr = flic_get_attr,
2758*4882a593Smuzhiyun .set_attr = flic_set_attr,
2759*4882a593Smuzhiyun .has_attr = flic_has_attr,
2760*4882a593Smuzhiyun .create = flic_create,
2761*4882a593Smuzhiyun .destroy = flic_destroy,
2762*4882a593Smuzhiyun };
2763*4882a593Smuzhiyun
get_ind_bit(__u64 addr,unsigned long bit_nr,bool swap)2764*4882a593Smuzhiyun static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
2765*4882a593Smuzhiyun {
2766*4882a593Smuzhiyun unsigned long bit;
2767*4882a593Smuzhiyun
2768*4882a593Smuzhiyun bit = bit_nr + (addr % PAGE_SIZE) * 8;
2769*4882a593Smuzhiyun
2770*4882a593Smuzhiyun return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
2771*4882a593Smuzhiyun }
2772*4882a593Smuzhiyun
get_map_page(struct kvm * kvm,u64 uaddr)2773*4882a593Smuzhiyun static struct page *get_map_page(struct kvm *kvm, u64 uaddr)
2774*4882a593Smuzhiyun {
2775*4882a593Smuzhiyun struct page *page = NULL;
2776*4882a593Smuzhiyun
2777*4882a593Smuzhiyun mmap_read_lock(kvm->mm);
2778*4882a593Smuzhiyun get_user_pages_remote(kvm->mm, uaddr, 1, FOLL_WRITE,
2779*4882a593Smuzhiyun &page, NULL, NULL);
2780*4882a593Smuzhiyun mmap_read_unlock(kvm->mm);
2781*4882a593Smuzhiyun return page;
2782*4882a593Smuzhiyun }
2783*4882a593Smuzhiyun
adapter_indicators_set(struct kvm * kvm,struct s390_io_adapter * adapter,struct kvm_s390_adapter_int * adapter_int)2784*4882a593Smuzhiyun static int adapter_indicators_set(struct kvm *kvm,
2785*4882a593Smuzhiyun struct s390_io_adapter *adapter,
2786*4882a593Smuzhiyun struct kvm_s390_adapter_int *adapter_int)
2787*4882a593Smuzhiyun {
2788*4882a593Smuzhiyun unsigned long bit;
2789*4882a593Smuzhiyun int summary_set, idx;
2790*4882a593Smuzhiyun struct page *ind_page, *summary_page;
2791*4882a593Smuzhiyun void *map;
2792*4882a593Smuzhiyun
2793*4882a593Smuzhiyun ind_page = get_map_page(kvm, adapter_int->ind_addr);
2794*4882a593Smuzhiyun if (!ind_page)
2795*4882a593Smuzhiyun return -1;
2796*4882a593Smuzhiyun summary_page = get_map_page(kvm, adapter_int->summary_addr);
2797*4882a593Smuzhiyun if (!summary_page) {
2798*4882a593Smuzhiyun put_page(ind_page);
2799*4882a593Smuzhiyun return -1;
2800*4882a593Smuzhiyun }
2801*4882a593Smuzhiyun
2802*4882a593Smuzhiyun idx = srcu_read_lock(&kvm->srcu);
2803*4882a593Smuzhiyun map = page_address(ind_page);
2804*4882a593Smuzhiyun bit = get_ind_bit(adapter_int->ind_addr,
2805*4882a593Smuzhiyun adapter_int->ind_offset, adapter->swap);
2806*4882a593Smuzhiyun set_bit(bit, map);
2807*4882a593Smuzhiyun mark_page_dirty(kvm, adapter_int->ind_addr >> PAGE_SHIFT);
2808*4882a593Smuzhiyun set_page_dirty_lock(ind_page);
2809*4882a593Smuzhiyun map = page_address(summary_page);
2810*4882a593Smuzhiyun bit = get_ind_bit(adapter_int->summary_addr,
2811*4882a593Smuzhiyun adapter_int->summary_offset, adapter->swap);
2812*4882a593Smuzhiyun summary_set = test_and_set_bit(bit, map);
2813*4882a593Smuzhiyun mark_page_dirty(kvm, adapter_int->summary_addr >> PAGE_SHIFT);
2814*4882a593Smuzhiyun set_page_dirty_lock(summary_page);
2815*4882a593Smuzhiyun srcu_read_unlock(&kvm->srcu, idx);
2816*4882a593Smuzhiyun
2817*4882a593Smuzhiyun put_page(ind_page);
2818*4882a593Smuzhiyun put_page(summary_page);
2819*4882a593Smuzhiyun return summary_set ? 0 : 1;
2820*4882a593Smuzhiyun }
2821*4882a593Smuzhiyun
2822*4882a593Smuzhiyun /*
2823*4882a593Smuzhiyun * < 0 - not injected due to error
2824*4882a593Smuzhiyun * = 0 - coalesced, summary indicator already active
2825*4882a593Smuzhiyun * > 0 - injected interrupt
2826*4882a593Smuzhiyun */
set_adapter_int(struct kvm_kernel_irq_routing_entry * e,struct kvm * kvm,int irq_source_id,int level,bool line_status)2827*4882a593Smuzhiyun static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
2828*4882a593Smuzhiyun struct kvm *kvm, int irq_source_id, int level,
2829*4882a593Smuzhiyun bool line_status)
2830*4882a593Smuzhiyun {
2831*4882a593Smuzhiyun int ret;
2832*4882a593Smuzhiyun struct s390_io_adapter *adapter;
2833*4882a593Smuzhiyun
2834*4882a593Smuzhiyun /* We're only interested in the 0->1 transition. */
2835*4882a593Smuzhiyun if (!level)
2836*4882a593Smuzhiyun return 0;
2837*4882a593Smuzhiyun adapter = get_io_adapter(kvm, e->adapter.adapter_id);
2838*4882a593Smuzhiyun if (!adapter)
2839*4882a593Smuzhiyun return -1;
2840*4882a593Smuzhiyun ret = adapter_indicators_set(kvm, adapter, &e->adapter);
2841*4882a593Smuzhiyun if ((ret > 0) && !adapter->masked) {
2842*4882a593Smuzhiyun ret = kvm_s390_inject_airq(kvm, adapter);
2843*4882a593Smuzhiyun if (ret == 0)
2844*4882a593Smuzhiyun ret = 1;
2845*4882a593Smuzhiyun }
2846*4882a593Smuzhiyun return ret;
2847*4882a593Smuzhiyun }
2848*4882a593Smuzhiyun
2849*4882a593Smuzhiyun /*
2850*4882a593Smuzhiyun * Inject the machine check to the guest.
2851*4882a593Smuzhiyun */
kvm_s390_reinject_machine_check(struct kvm_vcpu * vcpu,struct mcck_volatile_info * mcck_info)2852*4882a593Smuzhiyun void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu,
2853*4882a593Smuzhiyun struct mcck_volatile_info *mcck_info)
2854*4882a593Smuzhiyun {
2855*4882a593Smuzhiyun struct kvm_s390_interrupt_info inti;
2856*4882a593Smuzhiyun struct kvm_s390_irq irq;
2857*4882a593Smuzhiyun struct kvm_s390_mchk_info *mchk;
2858*4882a593Smuzhiyun union mci mci;
2859*4882a593Smuzhiyun __u64 cr14 = 0; /* upper bits are not used */
2860*4882a593Smuzhiyun int rc;
2861*4882a593Smuzhiyun
2862*4882a593Smuzhiyun mci.val = mcck_info->mcic;
2863*4882a593Smuzhiyun if (mci.sr)
2864*4882a593Smuzhiyun cr14 |= CR14_RECOVERY_SUBMASK;
2865*4882a593Smuzhiyun if (mci.dg)
2866*4882a593Smuzhiyun cr14 |= CR14_DEGRADATION_SUBMASK;
2867*4882a593Smuzhiyun if (mci.w)
2868*4882a593Smuzhiyun cr14 |= CR14_WARNING_SUBMASK;
2869*4882a593Smuzhiyun
2870*4882a593Smuzhiyun mchk = mci.ck ? &inti.mchk : &irq.u.mchk;
2871*4882a593Smuzhiyun mchk->cr14 = cr14;
2872*4882a593Smuzhiyun mchk->mcic = mcck_info->mcic;
2873*4882a593Smuzhiyun mchk->ext_damage_code = mcck_info->ext_damage_code;
2874*4882a593Smuzhiyun mchk->failing_storage_address = mcck_info->failing_storage_address;
2875*4882a593Smuzhiyun if (mci.ck) {
2876*4882a593Smuzhiyun /* Inject the floating machine check */
2877*4882a593Smuzhiyun inti.type = KVM_S390_MCHK;
2878*4882a593Smuzhiyun rc = __inject_vm(vcpu->kvm, &inti);
2879*4882a593Smuzhiyun } else {
2880*4882a593Smuzhiyun /* Inject the machine check to specified vcpu */
2881*4882a593Smuzhiyun irq.type = KVM_S390_MCHK;
2882*4882a593Smuzhiyun rc = kvm_s390_inject_vcpu(vcpu, &irq);
2883*4882a593Smuzhiyun }
2884*4882a593Smuzhiyun WARN_ON_ONCE(rc);
2885*4882a593Smuzhiyun }
2886*4882a593Smuzhiyun
kvm_set_routing_entry(struct kvm * kvm,struct kvm_kernel_irq_routing_entry * e,const struct kvm_irq_routing_entry * ue)2887*4882a593Smuzhiyun int kvm_set_routing_entry(struct kvm *kvm,
2888*4882a593Smuzhiyun struct kvm_kernel_irq_routing_entry *e,
2889*4882a593Smuzhiyun const struct kvm_irq_routing_entry *ue)
2890*4882a593Smuzhiyun {
2891*4882a593Smuzhiyun u64 uaddr;
2892*4882a593Smuzhiyun
2893*4882a593Smuzhiyun switch (ue->type) {
2894*4882a593Smuzhiyun /* we store the userspace addresses instead of the guest addresses */
2895*4882a593Smuzhiyun case KVM_IRQ_ROUTING_S390_ADAPTER:
2896*4882a593Smuzhiyun e->set = set_adapter_int;
2897*4882a593Smuzhiyun uaddr = gmap_translate(kvm->arch.gmap, ue->u.adapter.summary_addr);
2898*4882a593Smuzhiyun if (uaddr == -EFAULT)
2899*4882a593Smuzhiyun return -EFAULT;
2900*4882a593Smuzhiyun e->adapter.summary_addr = uaddr;
2901*4882a593Smuzhiyun uaddr = gmap_translate(kvm->arch.gmap, ue->u.adapter.ind_addr);
2902*4882a593Smuzhiyun if (uaddr == -EFAULT)
2903*4882a593Smuzhiyun return -EFAULT;
2904*4882a593Smuzhiyun e->adapter.ind_addr = uaddr;
2905*4882a593Smuzhiyun e->adapter.summary_offset = ue->u.adapter.summary_offset;
2906*4882a593Smuzhiyun e->adapter.ind_offset = ue->u.adapter.ind_offset;
2907*4882a593Smuzhiyun e->adapter.adapter_id = ue->u.adapter.adapter_id;
2908*4882a593Smuzhiyun return 0;
2909*4882a593Smuzhiyun default:
2910*4882a593Smuzhiyun return -EINVAL;
2911*4882a593Smuzhiyun }
2912*4882a593Smuzhiyun }
2913*4882a593Smuzhiyun
kvm_set_msi(struct kvm_kernel_irq_routing_entry * e,struct kvm * kvm,int irq_source_id,int level,bool line_status)2914*4882a593Smuzhiyun int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
2915*4882a593Smuzhiyun int irq_source_id, int level, bool line_status)
2916*4882a593Smuzhiyun {
2917*4882a593Smuzhiyun return -EINVAL;
2918*4882a593Smuzhiyun }
2919*4882a593Smuzhiyun
kvm_s390_set_irq_state(struct kvm_vcpu * vcpu,void __user * irqstate,int len)2920*4882a593Smuzhiyun int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *irqstate, int len)
2921*4882a593Smuzhiyun {
2922*4882a593Smuzhiyun struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2923*4882a593Smuzhiyun struct kvm_s390_irq *buf;
2924*4882a593Smuzhiyun int r = 0;
2925*4882a593Smuzhiyun int n;
2926*4882a593Smuzhiyun
2927*4882a593Smuzhiyun buf = vmalloc(len);
2928*4882a593Smuzhiyun if (!buf)
2929*4882a593Smuzhiyun return -ENOMEM;
2930*4882a593Smuzhiyun
2931*4882a593Smuzhiyun if (copy_from_user((void *) buf, irqstate, len)) {
2932*4882a593Smuzhiyun r = -EFAULT;
2933*4882a593Smuzhiyun goto out_free;
2934*4882a593Smuzhiyun }
2935*4882a593Smuzhiyun
2936*4882a593Smuzhiyun /*
2937*4882a593Smuzhiyun * Don't allow setting the interrupt state
2938*4882a593Smuzhiyun * when there are already interrupts pending
2939*4882a593Smuzhiyun */
2940*4882a593Smuzhiyun spin_lock(&li->lock);
2941*4882a593Smuzhiyun if (li->pending_irqs) {
2942*4882a593Smuzhiyun r = -EBUSY;
2943*4882a593Smuzhiyun goto out_unlock;
2944*4882a593Smuzhiyun }
2945*4882a593Smuzhiyun
2946*4882a593Smuzhiyun for (n = 0; n < len / sizeof(*buf); n++) {
2947*4882a593Smuzhiyun r = do_inject_vcpu(vcpu, &buf[n]);
2948*4882a593Smuzhiyun if (r)
2949*4882a593Smuzhiyun break;
2950*4882a593Smuzhiyun }
2951*4882a593Smuzhiyun
2952*4882a593Smuzhiyun out_unlock:
2953*4882a593Smuzhiyun spin_unlock(&li->lock);
2954*4882a593Smuzhiyun out_free:
2955*4882a593Smuzhiyun vfree(buf);
2956*4882a593Smuzhiyun
2957*4882a593Smuzhiyun return r;
2958*4882a593Smuzhiyun }
2959*4882a593Smuzhiyun
store_local_irq(struct kvm_s390_local_interrupt * li,struct kvm_s390_irq * irq,unsigned long irq_type)2960*4882a593Smuzhiyun static void store_local_irq(struct kvm_s390_local_interrupt *li,
2961*4882a593Smuzhiyun struct kvm_s390_irq *irq,
2962*4882a593Smuzhiyun unsigned long irq_type)
2963*4882a593Smuzhiyun {
2964*4882a593Smuzhiyun switch (irq_type) {
2965*4882a593Smuzhiyun case IRQ_PEND_MCHK_EX:
2966*4882a593Smuzhiyun case IRQ_PEND_MCHK_REP:
2967*4882a593Smuzhiyun irq->type = KVM_S390_MCHK;
2968*4882a593Smuzhiyun irq->u.mchk = li->irq.mchk;
2969*4882a593Smuzhiyun break;
2970*4882a593Smuzhiyun case IRQ_PEND_PROG:
2971*4882a593Smuzhiyun irq->type = KVM_S390_PROGRAM_INT;
2972*4882a593Smuzhiyun irq->u.pgm = li->irq.pgm;
2973*4882a593Smuzhiyun break;
2974*4882a593Smuzhiyun case IRQ_PEND_PFAULT_INIT:
2975*4882a593Smuzhiyun irq->type = KVM_S390_INT_PFAULT_INIT;
2976*4882a593Smuzhiyun irq->u.ext = li->irq.ext;
2977*4882a593Smuzhiyun break;
2978*4882a593Smuzhiyun case IRQ_PEND_EXT_EXTERNAL:
2979*4882a593Smuzhiyun irq->type = KVM_S390_INT_EXTERNAL_CALL;
2980*4882a593Smuzhiyun irq->u.extcall = li->irq.extcall;
2981*4882a593Smuzhiyun break;
2982*4882a593Smuzhiyun case IRQ_PEND_EXT_CLOCK_COMP:
2983*4882a593Smuzhiyun irq->type = KVM_S390_INT_CLOCK_COMP;
2984*4882a593Smuzhiyun break;
2985*4882a593Smuzhiyun case IRQ_PEND_EXT_CPU_TIMER:
2986*4882a593Smuzhiyun irq->type = KVM_S390_INT_CPU_TIMER;
2987*4882a593Smuzhiyun break;
2988*4882a593Smuzhiyun case IRQ_PEND_SIGP_STOP:
2989*4882a593Smuzhiyun irq->type = KVM_S390_SIGP_STOP;
2990*4882a593Smuzhiyun irq->u.stop = li->irq.stop;
2991*4882a593Smuzhiyun break;
2992*4882a593Smuzhiyun case IRQ_PEND_RESTART:
2993*4882a593Smuzhiyun irq->type = KVM_S390_RESTART;
2994*4882a593Smuzhiyun break;
2995*4882a593Smuzhiyun case IRQ_PEND_SET_PREFIX:
2996*4882a593Smuzhiyun irq->type = KVM_S390_SIGP_SET_PREFIX;
2997*4882a593Smuzhiyun irq->u.prefix = li->irq.prefix;
2998*4882a593Smuzhiyun break;
2999*4882a593Smuzhiyun }
3000*4882a593Smuzhiyun }
3001*4882a593Smuzhiyun
kvm_s390_get_irq_state(struct kvm_vcpu * vcpu,__u8 __user * buf,int len)3002*4882a593Smuzhiyun int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
3003*4882a593Smuzhiyun {
3004*4882a593Smuzhiyun int scn;
3005*4882a593Smuzhiyun DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
3006*4882a593Smuzhiyun struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
3007*4882a593Smuzhiyun unsigned long pending_irqs;
3008*4882a593Smuzhiyun struct kvm_s390_irq irq;
3009*4882a593Smuzhiyun unsigned long irq_type;
3010*4882a593Smuzhiyun int cpuaddr;
3011*4882a593Smuzhiyun int n = 0;
3012*4882a593Smuzhiyun
3013*4882a593Smuzhiyun spin_lock(&li->lock);
3014*4882a593Smuzhiyun pending_irqs = li->pending_irqs;
3015*4882a593Smuzhiyun memcpy(&sigp_emerg_pending, &li->sigp_emerg_pending,
3016*4882a593Smuzhiyun sizeof(sigp_emerg_pending));
3017*4882a593Smuzhiyun spin_unlock(&li->lock);
3018*4882a593Smuzhiyun
3019*4882a593Smuzhiyun for_each_set_bit(irq_type, &pending_irqs, IRQ_PEND_COUNT) {
3020*4882a593Smuzhiyun memset(&irq, 0, sizeof(irq));
3021*4882a593Smuzhiyun if (irq_type == IRQ_PEND_EXT_EMERGENCY)
3022*4882a593Smuzhiyun continue;
3023*4882a593Smuzhiyun if (n + sizeof(irq) > len)
3024*4882a593Smuzhiyun return -ENOBUFS;
3025*4882a593Smuzhiyun store_local_irq(&vcpu->arch.local_int, &irq, irq_type);
3026*4882a593Smuzhiyun if (copy_to_user(&buf[n], &irq, sizeof(irq)))
3027*4882a593Smuzhiyun return -EFAULT;
3028*4882a593Smuzhiyun n += sizeof(irq);
3029*4882a593Smuzhiyun }
3030*4882a593Smuzhiyun
3031*4882a593Smuzhiyun if (test_bit(IRQ_PEND_EXT_EMERGENCY, &pending_irqs)) {
3032*4882a593Smuzhiyun for_each_set_bit(cpuaddr, sigp_emerg_pending, KVM_MAX_VCPUS) {
3033*4882a593Smuzhiyun memset(&irq, 0, sizeof(irq));
3034*4882a593Smuzhiyun if (n + sizeof(irq) > len)
3035*4882a593Smuzhiyun return -ENOBUFS;
3036*4882a593Smuzhiyun irq.type = KVM_S390_INT_EMERGENCY;
3037*4882a593Smuzhiyun irq.u.emerg.code = cpuaddr;
3038*4882a593Smuzhiyun if (copy_to_user(&buf[n], &irq, sizeof(irq)))
3039*4882a593Smuzhiyun return -EFAULT;
3040*4882a593Smuzhiyun n += sizeof(irq);
3041*4882a593Smuzhiyun }
3042*4882a593Smuzhiyun }
3043*4882a593Smuzhiyun
3044*4882a593Smuzhiyun if (sca_ext_call_pending(vcpu, &scn)) {
3045*4882a593Smuzhiyun if (n + sizeof(irq) > len)
3046*4882a593Smuzhiyun return -ENOBUFS;
3047*4882a593Smuzhiyun memset(&irq, 0, sizeof(irq));
3048*4882a593Smuzhiyun irq.type = KVM_S390_INT_EXTERNAL_CALL;
3049*4882a593Smuzhiyun irq.u.extcall.code = scn;
3050*4882a593Smuzhiyun if (copy_to_user(&buf[n], &irq, sizeof(irq)))
3051*4882a593Smuzhiyun return -EFAULT;
3052*4882a593Smuzhiyun n += sizeof(irq);
3053*4882a593Smuzhiyun }
3054*4882a593Smuzhiyun
3055*4882a593Smuzhiyun return n;
3056*4882a593Smuzhiyun }
3057*4882a593Smuzhiyun
__airqs_kick_single_vcpu(struct kvm * kvm,u8 deliverable_mask)3058*4882a593Smuzhiyun static void __airqs_kick_single_vcpu(struct kvm *kvm, u8 deliverable_mask)
3059*4882a593Smuzhiyun {
3060*4882a593Smuzhiyun int vcpu_idx, online_vcpus = atomic_read(&kvm->online_vcpus);
3061*4882a593Smuzhiyun struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
3062*4882a593Smuzhiyun struct kvm_vcpu *vcpu;
3063*4882a593Smuzhiyun u8 vcpu_isc_mask;
3064*4882a593Smuzhiyun
3065*4882a593Smuzhiyun for_each_set_bit(vcpu_idx, kvm->arch.idle_mask, online_vcpus) {
3066*4882a593Smuzhiyun vcpu = kvm_get_vcpu(kvm, vcpu_idx);
3067*4882a593Smuzhiyun if (psw_ioint_disabled(vcpu))
3068*4882a593Smuzhiyun continue;
3069*4882a593Smuzhiyun vcpu_isc_mask = (u8)(vcpu->arch.sie_block->gcr[6] >> 24);
3070*4882a593Smuzhiyun if (deliverable_mask & vcpu_isc_mask) {
3071*4882a593Smuzhiyun /* lately kicked but not yet running */
3072*4882a593Smuzhiyun if (test_and_set_bit(vcpu_idx, gi->kicked_mask))
3073*4882a593Smuzhiyun return;
3074*4882a593Smuzhiyun kvm_s390_vcpu_wakeup(vcpu);
3075*4882a593Smuzhiyun return;
3076*4882a593Smuzhiyun }
3077*4882a593Smuzhiyun }
3078*4882a593Smuzhiyun }
3079*4882a593Smuzhiyun
gisa_vcpu_kicker(struct hrtimer * timer)3080*4882a593Smuzhiyun static enum hrtimer_restart gisa_vcpu_kicker(struct hrtimer *timer)
3081*4882a593Smuzhiyun {
3082*4882a593Smuzhiyun struct kvm_s390_gisa_interrupt *gi =
3083*4882a593Smuzhiyun container_of(timer, struct kvm_s390_gisa_interrupt, timer);
3084*4882a593Smuzhiyun struct kvm *kvm =
3085*4882a593Smuzhiyun container_of(gi->origin, struct sie_page2, gisa)->kvm;
3086*4882a593Smuzhiyun u8 pending_mask;
3087*4882a593Smuzhiyun
3088*4882a593Smuzhiyun pending_mask = gisa_get_ipm_or_restore_iam(gi);
3089*4882a593Smuzhiyun if (pending_mask) {
3090*4882a593Smuzhiyun __airqs_kick_single_vcpu(kvm, pending_mask);
3091*4882a593Smuzhiyun hrtimer_forward_now(timer, ns_to_ktime(gi->expires));
3092*4882a593Smuzhiyun return HRTIMER_RESTART;
3093*4882a593Smuzhiyun }
3094*4882a593Smuzhiyun
3095*4882a593Smuzhiyun return HRTIMER_NORESTART;
3096*4882a593Smuzhiyun }
3097*4882a593Smuzhiyun
3098*4882a593Smuzhiyun #define NULL_GISA_ADDR 0x00000000UL
3099*4882a593Smuzhiyun #define NONE_GISA_ADDR 0x00000001UL
3100*4882a593Smuzhiyun #define GISA_ADDR_MASK 0xfffff000UL
3101*4882a593Smuzhiyun
process_gib_alert_list(void)3102*4882a593Smuzhiyun static void process_gib_alert_list(void)
3103*4882a593Smuzhiyun {
3104*4882a593Smuzhiyun struct kvm_s390_gisa_interrupt *gi;
3105*4882a593Smuzhiyun struct kvm_s390_gisa *gisa;
3106*4882a593Smuzhiyun struct kvm *kvm;
3107*4882a593Smuzhiyun u32 final, origin = 0UL;
3108*4882a593Smuzhiyun
3109*4882a593Smuzhiyun do {
3110*4882a593Smuzhiyun /*
3111*4882a593Smuzhiyun * If the NONE_GISA_ADDR is still stored in the alert list
3112*4882a593Smuzhiyun * origin, we will leave the outer loop. No further GISA has
3113*4882a593Smuzhiyun * been added to the alert list by millicode while processing
3114*4882a593Smuzhiyun * the current alert list.
3115*4882a593Smuzhiyun */
3116*4882a593Smuzhiyun final = (origin & NONE_GISA_ADDR);
3117*4882a593Smuzhiyun /*
3118*4882a593Smuzhiyun * Cut off the alert list and store the NONE_GISA_ADDR in the
3119*4882a593Smuzhiyun * alert list origin to avoid further GAL interruptions.
3120*4882a593Smuzhiyun * A new alert list can be build up by millicode in parallel
3121*4882a593Smuzhiyun * for guests not in the yet cut-off alert list. When in the
3122*4882a593Smuzhiyun * final loop, store the NULL_GISA_ADDR instead. This will re-
3123*4882a593Smuzhiyun * enable GAL interruptions on the host again.
3124*4882a593Smuzhiyun */
3125*4882a593Smuzhiyun origin = xchg(&gib->alert_list_origin,
3126*4882a593Smuzhiyun (!final) ? NONE_GISA_ADDR : NULL_GISA_ADDR);
3127*4882a593Smuzhiyun /*
3128*4882a593Smuzhiyun * Loop through the just cut-off alert list and start the
3129*4882a593Smuzhiyun * gisa timers to kick idle vcpus to consume the pending
3130*4882a593Smuzhiyun * interruptions asap.
3131*4882a593Smuzhiyun */
3132*4882a593Smuzhiyun while (origin & GISA_ADDR_MASK) {
3133*4882a593Smuzhiyun gisa = (struct kvm_s390_gisa *)(u64)origin;
3134*4882a593Smuzhiyun origin = gisa->next_alert;
3135*4882a593Smuzhiyun gisa->next_alert = (u32)(u64)gisa;
3136*4882a593Smuzhiyun kvm = container_of(gisa, struct sie_page2, gisa)->kvm;
3137*4882a593Smuzhiyun gi = &kvm->arch.gisa_int;
3138*4882a593Smuzhiyun if (hrtimer_active(&gi->timer))
3139*4882a593Smuzhiyun hrtimer_cancel(&gi->timer);
3140*4882a593Smuzhiyun hrtimer_start(&gi->timer, 0, HRTIMER_MODE_REL);
3141*4882a593Smuzhiyun }
3142*4882a593Smuzhiyun } while (!final);
3143*4882a593Smuzhiyun
3144*4882a593Smuzhiyun }
3145*4882a593Smuzhiyun
kvm_s390_gisa_clear(struct kvm * kvm)3146*4882a593Smuzhiyun void kvm_s390_gisa_clear(struct kvm *kvm)
3147*4882a593Smuzhiyun {
3148*4882a593Smuzhiyun struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
3149*4882a593Smuzhiyun
3150*4882a593Smuzhiyun if (!gi->origin)
3151*4882a593Smuzhiyun return;
3152*4882a593Smuzhiyun gisa_clear_ipm(gi->origin);
3153*4882a593Smuzhiyun VM_EVENT(kvm, 3, "gisa 0x%pK cleared", gi->origin);
3154*4882a593Smuzhiyun }
3155*4882a593Smuzhiyun
kvm_s390_gisa_init(struct kvm * kvm)3156*4882a593Smuzhiyun void kvm_s390_gisa_init(struct kvm *kvm)
3157*4882a593Smuzhiyun {
3158*4882a593Smuzhiyun struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
3159*4882a593Smuzhiyun
3160*4882a593Smuzhiyun if (!css_general_characteristics.aiv)
3161*4882a593Smuzhiyun return;
3162*4882a593Smuzhiyun gi->origin = &kvm->arch.sie_page2->gisa;
3163*4882a593Smuzhiyun gi->alert.mask = 0;
3164*4882a593Smuzhiyun spin_lock_init(&gi->alert.ref_lock);
3165*4882a593Smuzhiyun gi->expires = 50 * 1000; /* 50 usec */
3166*4882a593Smuzhiyun hrtimer_init(&gi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3167*4882a593Smuzhiyun gi->timer.function = gisa_vcpu_kicker;
3168*4882a593Smuzhiyun memset(gi->origin, 0, sizeof(struct kvm_s390_gisa));
3169*4882a593Smuzhiyun gi->origin->next_alert = (u32)(u64)gi->origin;
3170*4882a593Smuzhiyun VM_EVENT(kvm, 3, "gisa 0x%pK initialized", gi->origin);
3171*4882a593Smuzhiyun }
3172*4882a593Smuzhiyun
kvm_s390_gisa_destroy(struct kvm * kvm)3173*4882a593Smuzhiyun void kvm_s390_gisa_destroy(struct kvm *kvm)
3174*4882a593Smuzhiyun {
3175*4882a593Smuzhiyun struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
3176*4882a593Smuzhiyun
3177*4882a593Smuzhiyun if (!gi->origin)
3178*4882a593Smuzhiyun return;
3179*4882a593Smuzhiyun if (gi->alert.mask)
3180*4882a593Smuzhiyun KVM_EVENT(3, "vm 0x%pK has unexpected iam 0x%02x",
3181*4882a593Smuzhiyun kvm, gi->alert.mask);
3182*4882a593Smuzhiyun while (gisa_in_alert_list(gi->origin))
3183*4882a593Smuzhiyun cpu_relax();
3184*4882a593Smuzhiyun hrtimer_cancel(&gi->timer);
3185*4882a593Smuzhiyun gi->origin = NULL;
3186*4882a593Smuzhiyun }
3187*4882a593Smuzhiyun
3188*4882a593Smuzhiyun /**
3189*4882a593Smuzhiyun * kvm_s390_gisc_register - register a guest ISC
3190*4882a593Smuzhiyun *
3191*4882a593Smuzhiyun * @kvm: the kernel vm to work with
3192*4882a593Smuzhiyun * @gisc: the guest interruption sub class to register
3193*4882a593Smuzhiyun *
3194*4882a593Smuzhiyun * The function extends the vm specific alert mask to use.
3195*4882a593Smuzhiyun * The effective IAM mask in the GISA is updated as well
3196*4882a593Smuzhiyun * in case the GISA is not part of the GIB alert list.
3197*4882a593Smuzhiyun * It will be updated latest when the IAM gets restored
3198*4882a593Smuzhiyun * by gisa_get_ipm_or_restore_iam().
3199*4882a593Smuzhiyun *
3200*4882a593Smuzhiyun * Returns: the nonspecific ISC (NISC) the gib alert mechanism
3201*4882a593Smuzhiyun * has registered with the channel subsystem.
3202*4882a593Smuzhiyun * -ENODEV in case the vm uses no GISA
3203*4882a593Smuzhiyun * -ERANGE in case the guest ISC is invalid
3204*4882a593Smuzhiyun */
kvm_s390_gisc_register(struct kvm * kvm,u32 gisc)3205*4882a593Smuzhiyun int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc)
3206*4882a593Smuzhiyun {
3207*4882a593Smuzhiyun struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
3208*4882a593Smuzhiyun
3209*4882a593Smuzhiyun if (!gi->origin)
3210*4882a593Smuzhiyun return -ENODEV;
3211*4882a593Smuzhiyun if (gisc > MAX_ISC)
3212*4882a593Smuzhiyun return -ERANGE;
3213*4882a593Smuzhiyun
3214*4882a593Smuzhiyun spin_lock(&gi->alert.ref_lock);
3215*4882a593Smuzhiyun gi->alert.ref_count[gisc]++;
3216*4882a593Smuzhiyun if (gi->alert.ref_count[gisc] == 1) {
3217*4882a593Smuzhiyun gi->alert.mask |= 0x80 >> gisc;
3218*4882a593Smuzhiyun gisa_set_iam(gi->origin, gi->alert.mask);
3219*4882a593Smuzhiyun }
3220*4882a593Smuzhiyun spin_unlock(&gi->alert.ref_lock);
3221*4882a593Smuzhiyun
3222*4882a593Smuzhiyun return gib->nisc;
3223*4882a593Smuzhiyun }
3224*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvm_s390_gisc_register);
3225*4882a593Smuzhiyun
3226*4882a593Smuzhiyun /**
3227*4882a593Smuzhiyun * kvm_s390_gisc_unregister - unregister a guest ISC
3228*4882a593Smuzhiyun *
3229*4882a593Smuzhiyun * @kvm: the kernel vm to work with
3230*4882a593Smuzhiyun * @gisc: the guest interruption sub class to register
3231*4882a593Smuzhiyun *
3232*4882a593Smuzhiyun * The function reduces the vm specific alert mask to use.
3233*4882a593Smuzhiyun * The effective IAM mask in the GISA is updated as well
3234*4882a593Smuzhiyun * in case the GISA is not part of the GIB alert list.
3235*4882a593Smuzhiyun * It will be updated latest when the IAM gets restored
3236*4882a593Smuzhiyun * by gisa_get_ipm_or_restore_iam().
3237*4882a593Smuzhiyun *
3238*4882a593Smuzhiyun * Returns: the nonspecific ISC (NISC) the gib alert mechanism
3239*4882a593Smuzhiyun * has registered with the channel subsystem.
3240*4882a593Smuzhiyun * -ENODEV in case the vm uses no GISA
3241*4882a593Smuzhiyun * -ERANGE in case the guest ISC is invalid
3242*4882a593Smuzhiyun * -EINVAL in case the guest ISC is not registered
3243*4882a593Smuzhiyun */
kvm_s390_gisc_unregister(struct kvm * kvm,u32 gisc)3244*4882a593Smuzhiyun int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc)
3245*4882a593Smuzhiyun {
3246*4882a593Smuzhiyun struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
3247*4882a593Smuzhiyun int rc = 0;
3248*4882a593Smuzhiyun
3249*4882a593Smuzhiyun if (!gi->origin)
3250*4882a593Smuzhiyun return -ENODEV;
3251*4882a593Smuzhiyun if (gisc > MAX_ISC)
3252*4882a593Smuzhiyun return -ERANGE;
3253*4882a593Smuzhiyun
3254*4882a593Smuzhiyun spin_lock(&gi->alert.ref_lock);
3255*4882a593Smuzhiyun if (gi->alert.ref_count[gisc] == 0) {
3256*4882a593Smuzhiyun rc = -EINVAL;
3257*4882a593Smuzhiyun goto out;
3258*4882a593Smuzhiyun }
3259*4882a593Smuzhiyun gi->alert.ref_count[gisc]--;
3260*4882a593Smuzhiyun if (gi->alert.ref_count[gisc] == 0) {
3261*4882a593Smuzhiyun gi->alert.mask &= ~(0x80 >> gisc);
3262*4882a593Smuzhiyun gisa_set_iam(gi->origin, gi->alert.mask);
3263*4882a593Smuzhiyun }
3264*4882a593Smuzhiyun out:
3265*4882a593Smuzhiyun spin_unlock(&gi->alert.ref_lock);
3266*4882a593Smuzhiyun
3267*4882a593Smuzhiyun return rc;
3268*4882a593Smuzhiyun }
3269*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvm_s390_gisc_unregister);
3270*4882a593Smuzhiyun
gib_alert_irq_handler(struct airq_struct * airq,bool floating)3271*4882a593Smuzhiyun static void gib_alert_irq_handler(struct airq_struct *airq, bool floating)
3272*4882a593Smuzhiyun {
3273*4882a593Smuzhiyun inc_irq_stat(IRQIO_GAL);
3274*4882a593Smuzhiyun process_gib_alert_list();
3275*4882a593Smuzhiyun }
3276*4882a593Smuzhiyun
3277*4882a593Smuzhiyun static struct airq_struct gib_alert_irq = {
3278*4882a593Smuzhiyun .handler = gib_alert_irq_handler,
3279*4882a593Smuzhiyun .lsi_ptr = &gib_alert_irq.lsi_mask,
3280*4882a593Smuzhiyun };
3281*4882a593Smuzhiyun
kvm_s390_gib_destroy(void)3282*4882a593Smuzhiyun void kvm_s390_gib_destroy(void)
3283*4882a593Smuzhiyun {
3284*4882a593Smuzhiyun if (!gib)
3285*4882a593Smuzhiyun return;
3286*4882a593Smuzhiyun chsc_sgib(0);
3287*4882a593Smuzhiyun unregister_adapter_interrupt(&gib_alert_irq);
3288*4882a593Smuzhiyun free_page((unsigned long)gib);
3289*4882a593Smuzhiyun gib = NULL;
3290*4882a593Smuzhiyun }
3291*4882a593Smuzhiyun
kvm_s390_gib_init(u8 nisc)3292*4882a593Smuzhiyun int kvm_s390_gib_init(u8 nisc)
3293*4882a593Smuzhiyun {
3294*4882a593Smuzhiyun int rc = 0;
3295*4882a593Smuzhiyun
3296*4882a593Smuzhiyun if (!css_general_characteristics.aiv) {
3297*4882a593Smuzhiyun KVM_EVENT(3, "%s", "gib not initialized, no AIV facility");
3298*4882a593Smuzhiyun goto out;
3299*4882a593Smuzhiyun }
3300*4882a593Smuzhiyun
3301*4882a593Smuzhiyun gib = (struct kvm_s390_gib *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
3302*4882a593Smuzhiyun if (!gib) {
3303*4882a593Smuzhiyun rc = -ENOMEM;
3304*4882a593Smuzhiyun goto out;
3305*4882a593Smuzhiyun }
3306*4882a593Smuzhiyun
3307*4882a593Smuzhiyun gib_alert_irq.isc = nisc;
3308*4882a593Smuzhiyun if (register_adapter_interrupt(&gib_alert_irq)) {
3309*4882a593Smuzhiyun pr_err("Registering the GIB alert interruption handler failed\n");
3310*4882a593Smuzhiyun rc = -EIO;
3311*4882a593Smuzhiyun goto out_free_gib;
3312*4882a593Smuzhiyun }
3313*4882a593Smuzhiyun
3314*4882a593Smuzhiyun gib->nisc = nisc;
3315*4882a593Smuzhiyun if (chsc_sgib((u32)(u64)gib)) {
3316*4882a593Smuzhiyun pr_err("Associating the GIB with the AIV facility failed\n");
3317*4882a593Smuzhiyun free_page((unsigned long)gib);
3318*4882a593Smuzhiyun gib = NULL;
3319*4882a593Smuzhiyun rc = -EIO;
3320*4882a593Smuzhiyun goto out_unreg_gal;
3321*4882a593Smuzhiyun }
3322*4882a593Smuzhiyun
3323*4882a593Smuzhiyun KVM_EVENT(3, "gib 0x%pK (nisc=%d) initialized", gib, gib->nisc);
3324*4882a593Smuzhiyun goto out;
3325*4882a593Smuzhiyun
3326*4882a593Smuzhiyun out_unreg_gal:
3327*4882a593Smuzhiyun unregister_adapter_interrupt(&gib_alert_irq);
3328*4882a593Smuzhiyun out_free_gib:
3329*4882a593Smuzhiyun free_page((unsigned long)gib);
3330*4882a593Smuzhiyun gib = NULL;
3331*4882a593Smuzhiyun out:
3332*4882a593Smuzhiyun return rc;
3333*4882a593Smuzhiyun }
3334