xref: /OK3568_Linux_fs/kernel/arch/powerpc/kvm/booke.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright IBM Corp. 2007
5*4882a593Smuzhiyun  * Copyright 2010-2011 Freescale Semiconductor, Inc.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
8*4882a593Smuzhiyun  *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
9*4882a593Smuzhiyun  *          Scott Wood <scottwood@freescale.com>
10*4882a593Smuzhiyun  *          Varun Sethi <varun.sethi@freescale.com>
11*4882a593Smuzhiyun  */
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <linux/errno.h>
14*4882a593Smuzhiyun #include <linux/err.h>
15*4882a593Smuzhiyun #include <linux/kvm_host.h>
16*4882a593Smuzhiyun #include <linux/gfp.h>
17*4882a593Smuzhiyun #include <linux/module.h>
18*4882a593Smuzhiyun #include <linux/vmalloc.h>
19*4882a593Smuzhiyun #include <linux/fs.h>
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #include <asm/cputable.h>
22*4882a593Smuzhiyun #include <linux/uaccess.h>
23*4882a593Smuzhiyun #include <asm/kvm_ppc.h>
24*4882a593Smuzhiyun #include <asm/cacheflush.h>
25*4882a593Smuzhiyun #include <asm/dbell.h>
26*4882a593Smuzhiyun #include <asm/hw_irq.h>
27*4882a593Smuzhiyun #include <asm/irq.h>
28*4882a593Smuzhiyun #include <asm/time.h>
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #include "timing.h"
31*4882a593Smuzhiyun #include "booke.h"
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #define CREATE_TRACE_POINTS
34*4882a593Smuzhiyun #include "trace_booke.h"
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun unsigned long kvmppc_booke_handlers;
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun struct kvm_stats_debugfs_item debugfs_entries[] = {
39*4882a593Smuzhiyun 	VCPU_STAT("mmio", mmio_exits),
40*4882a593Smuzhiyun 	VCPU_STAT("sig", signal_exits),
41*4882a593Smuzhiyun 	VCPU_STAT("itlb_r", itlb_real_miss_exits),
42*4882a593Smuzhiyun 	VCPU_STAT("itlb_v", itlb_virt_miss_exits),
43*4882a593Smuzhiyun 	VCPU_STAT("dtlb_r", dtlb_real_miss_exits),
44*4882a593Smuzhiyun 	VCPU_STAT("dtlb_v", dtlb_virt_miss_exits),
45*4882a593Smuzhiyun 	VCPU_STAT("sysc", syscall_exits),
46*4882a593Smuzhiyun 	VCPU_STAT("isi", isi_exits),
47*4882a593Smuzhiyun 	VCPU_STAT("dsi", dsi_exits),
48*4882a593Smuzhiyun 	VCPU_STAT("inst_emu", emulated_inst_exits),
49*4882a593Smuzhiyun 	VCPU_STAT("dec", dec_exits),
50*4882a593Smuzhiyun 	VCPU_STAT("ext_intr", ext_intr_exits),
51*4882a593Smuzhiyun 	VCPU_STAT("halt_successful_poll", halt_successful_poll),
52*4882a593Smuzhiyun 	VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
53*4882a593Smuzhiyun 	VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
54*4882a593Smuzhiyun 	VCPU_STAT("halt_wakeup", halt_wakeup),
55*4882a593Smuzhiyun 	VCPU_STAT("doorbell", dbell_exits),
56*4882a593Smuzhiyun 	VCPU_STAT("guest doorbell", gdbell_exits),
57*4882a593Smuzhiyun 	VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
58*4882a593Smuzhiyun 	VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
59*4882a593Smuzhiyun 	VM_STAT("remote_tlb_flush", remote_tlb_flush),
60*4882a593Smuzhiyun 	{ NULL }
61*4882a593Smuzhiyun };
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun /* TODO: use vcpu_printf() */
kvmppc_dump_vcpu(struct kvm_vcpu * vcpu)64*4882a593Smuzhiyun void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	int i;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	printk("pc:   %08lx msr:  %08llx\n", vcpu->arch.regs.nip,
69*4882a593Smuzhiyun 			vcpu->arch.shared->msr);
70*4882a593Smuzhiyun 	printk("lr:   %08lx ctr:  %08lx\n", vcpu->arch.regs.link,
71*4882a593Smuzhiyun 			vcpu->arch.regs.ctr);
72*4882a593Smuzhiyun 	printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
73*4882a593Smuzhiyun 					    vcpu->arch.shared->srr1);
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	for (i = 0; i < 32; i += 4) {
78*4882a593Smuzhiyun 		printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
79*4882a593Smuzhiyun 		       kvmppc_get_gpr(vcpu, i),
80*4882a593Smuzhiyun 		       kvmppc_get_gpr(vcpu, i+1),
81*4882a593Smuzhiyun 		       kvmppc_get_gpr(vcpu, i+2),
82*4882a593Smuzhiyun 		       kvmppc_get_gpr(vcpu, i+3));
83*4882a593Smuzhiyun 	}
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun #ifdef CONFIG_SPE
kvmppc_vcpu_disable_spe(struct kvm_vcpu * vcpu)87*4882a593Smuzhiyun void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun 	preempt_disable();
90*4882a593Smuzhiyun 	enable_kernel_spe();
91*4882a593Smuzhiyun 	kvmppc_save_guest_spe(vcpu);
92*4882a593Smuzhiyun 	disable_kernel_spe();
93*4882a593Smuzhiyun 	vcpu->arch.shadow_msr &= ~MSR_SPE;
94*4882a593Smuzhiyun 	preempt_enable();
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun 
kvmppc_vcpu_enable_spe(struct kvm_vcpu * vcpu)97*4882a593Smuzhiyun static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun 	preempt_disable();
100*4882a593Smuzhiyun 	enable_kernel_spe();
101*4882a593Smuzhiyun 	kvmppc_load_guest_spe(vcpu);
102*4882a593Smuzhiyun 	disable_kernel_spe();
103*4882a593Smuzhiyun 	vcpu->arch.shadow_msr |= MSR_SPE;
104*4882a593Smuzhiyun 	preempt_enable();
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun 
kvmppc_vcpu_sync_spe(struct kvm_vcpu * vcpu)107*4882a593Smuzhiyun static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun 	if (vcpu->arch.shared->msr & MSR_SPE) {
110*4882a593Smuzhiyun 		if (!(vcpu->arch.shadow_msr & MSR_SPE))
111*4882a593Smuzhiyun 			kvmppc_vcpu_enable_spe(vcpu);
112*4882a593Smuzhiyun 	} else if (vcpu->arch.shadow_msr & MSR_SPE) {
113*4882a593Smuzhiyun 		kvmppc_vcpu_disable_spe(vcpu);
114*4882a593Smuzhiyun 	}
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun #else
kvmppc_vcpu_sync_spe(struct kvm_vcpu * vcpu)117*4882a593Smuzhiyun static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun #endif
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun /*
123*4882a593Smuzhiyun  * Load up guest vcpu FP state if it's needed.
124*4882a593Smuzhiyun  * It also set the MSR_FP in thread so that host know
125*4882a593Smuzhiyun  * we're holding FPU, and then host can help to save
126*4882a593Smuzhiyun  * guest vcpu FP state if other threads require to use FPU.
127*4882a593Smuzhiyun  * This simulates an FP unavailable fault.
128*4882a593Smuzhiyun  *
129*4882a593Smuzhiyun  * It requires to be called with preemption disabled.
130*4882a593Smuzhiyun  */
kvmppc_load_guest_fp(struct kvm_vcpu * vcpu)131*4882a593Smuzhiyun static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun #ifdef CONFIG_PPC_FPU
134*4882a593Smuzhiyun 	if (!(current->thread.regs->msr & MSR_FP)) {
135*4882a593Smuzhiyun 		enable_kernel_fp();
136*4882a593Smuzhiyun 		load_fp_state(&vcpu->arch.fp);
137*4882a593Smuzhiyun 		disable_kernel_fp();
138*4882a593Smuzhiyun 		current->thread.fp_save_area = &vcpu->arch.fp;
139*4882a593Smuzhiyun 		current->thread.regs->msr |= MSR_FP;
140*4882a593Smuzhiyun 	}
141*4882a593Smuzhiyun #endif
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun /*
145*4882a593Smuzhiyun  * Save guest vcpu FP state into thread.
146*4882a593Smuzhiyun  * It requires to be called with preemption disabled.
147*4882a593Smuzhiyun  */
kvmppc_save_guest_fp(struct kvm_vcpu * vcpu)148*4882a593Smuzhiyun static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun #ifdef CONFIG_PPC_FPU
151*4882a593Smuzhiyun 	if (current->thread.regs->msr & MSR_FP)
152*4882a593Smuzhiyun 		giveup_fpu(current);
153*4882a593Smuzhiyun 	current->thread.fp_save_area = NULL;
154*4882a593Smuzhiyun #endif
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun 
kvmppc_vcpu_sync_fpu(struct kvm_vcpu * vcpu)157*4882a593Smuzhiyun static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun #if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV)
160*4882a593Smuzhiyun 	/* We always treat the FP bit as enabled from the host
161*4882a593Smuzhiyun 	   perspective, so only need to adjust the shadow MSR */
162*4882a593Smuzhiyun 	vcpu->arch.shadow_msr &= ~MSR_FP;
163*4882a593Smuzhiyun 	vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP;
164*4882a593Smuzhiyun #endif
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun /*
168*4882a593Smuzhiyun  * Simulate AltiVec unavailable fault to load guest state
169*4882a593Smuzhiyun  * from thread to AltiVec unit.
170*4882a593Smuzhiyun  * It requires to be called with preemption disabled.
171*4882a593Smuzhiyun  */
kvmppc_load_guest_altivec(struct kvm_vcpu * vcpu)172*4882a593Smuzhiyun static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun #ifdef CONFIG_ALTIVEC
175*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
176*4882a593Smuzhiyun 		if (!(current->thread.regs->msr & MSR_VEC)) {
177*4882a593Smuzhiyun 			enable_kernel_altivec();
178*4882a593Smuzhiyun 			load_vr_state(&vcpu->arch.vr);
179*4882a593Smuzhiyun 			disable_kernel_altivec();
180*4882a593Smuzhiyun 			current->thread.vr_save_area = &vcpu->arch.vr;
181*4882a593Smuzhiyun 			current->thread.regs->msr |= MSR_VEC;
182*4882a593Smuzhiyun 		}
183*4882a593Smuzhiyun 	}
184*4882a593Smuzhiyun #endif
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun /*
188*4882a593Smuzhiyun  * Save guest vcpu AltiVec state into thread.
189*4882a593Smuzhiyun  * It requires to be called with preemption disabled.
190*4882a593Smuzhiyun  */
kvmppc_save_guest_altivec(struct kvm_vcpu * vcpu)191*4882a593Smuzhiyun static inline void kvmppc_save_guest_altivec(struct kvm_vcpu *vcpu)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun #ifdef CONFIG_ALTIVEC
194*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
195*4882a593Smuzhiyun 		if (current->thread.regs->msr & MSR_VEC)
196*4882a593Smuzhiyun 			giveup_altivec(current);
197*4882a593Smuzhiyun 		current->thread.vr_save_area = NULL;
198*4882a593Smuzhiyun 	}
199*4882a593Smuzhiyun #endif
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun 
kvmppc_vcpu_sync_debug(struct kvm_vcpu * vcpu)202*4882a593Smuzhiyun static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun 	/* Synchronize guest's desire to get debug interrupts into shadow MSR */
205*4882a593Smuzhiyun #ifndef CONFIG_KVM_BOOKE_HV
206*4882a593Smuzhiyun 	vcpu->arch.shadow_msr &= ~MSR_DE;
207*4882a593Smuzhiyun 	vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE;
208*4882a593Smuzhiyun #endif
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	/* Force enable debug interrupts when user space wants to debug */
211*4882a593Smuzhiyun 	if (vcpu->guest_debug) {
212*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOKE_HV
213*4882a593Smuzhiyun 		/*
214*4882a593Smuzhiyun 		 * Since there is no shadow MSR, sync MSR_DE into the guest
215*4882a593Smuzhiyun 		 * visible MSR.
216*4882a593Smuzhiyun 		 */
217*4882a593Smuzhiyun 		vcpu->arch.shared->msr |= MSR_DE;
218*4882a593Smuzhiyun #else
219*4882a593Smuzhiyun 		vcpu->arch.shadow_msr |= MSR_DE;
220*4882a593Smuzhiyun 		vcpu->arch.shared->msr &= ~MSR_DE;
221*4882a593Smuzhiyun #endif
222*4882a593Smuzhiyun 	}
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun /*
226*4882a593Smuzhiyun  * Helper function for "full" MSR writes.  No need to call this if only
227*4882a593Smuzhiyun  * EE/CE/ME/DE/RI are changing.
228*4882a593Smuzhiyun  */
kvmppc_set_msr(struct kvm_vcpu * vcpu,u32 new_msr)229*4882a593Smuzhiyun void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun 	u32 old_msr = vcpu->arch.shared->msr;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOKE_HV
234*4882a593Smuzhiyun 	new_msr |= MSR_GS;
235*4882a593Smuzhiyun #endif
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	vcpu->arch.shared->msr = new_msr;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	kvmppc_mmu_msr_notify(vcpu, old_msr);
240*4882a593Smuzhiyun 	kvmppc_vcpu_sync_spe(vcpu);
241*4882a593Smuzhiyun 	kvmppc_vcpu_sync_fpu(vcpu);
242*4882a593Smuzhiyun 	kvmppc_vcpu_sync_debug(vcpu);
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun 
kvmppc_booke_queue_irqprio(struct kvm_vcpu * vcpu,unsigned int priority)245*4882a593Smuzhiyun static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
246*4882a593Smuzhiyun                                        unsigned int priority)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun 	trace_kvm_booke_queue_irqprio(vcpu, priority);
249*4882a593Smuzhiyun 	set_bit(priority, &vcpu->arch.pending_exceptions);
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun 
kvmppc_core_queue_dtlb_miss(struct kvm_vcpu * vcpu,ulong dear_flags,ulong esr_flags)252*4882a593Smuzhiyun void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
253*4882a593Smuzhiyun 				 ulong dear_flags, ulong esr_flags)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun 	vcpu->arch.queued_dear = dear_flags;
256*4882a593Smuzhiyun 	vcpu->arch.queued_esr = esr_flags;
257*4882a593Smuzhiyun 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun 
kvmppc_core_queue_data_storage(struct kvm_vcpu * vcpu,ulong dear_flags,ulong esr_flags)260*4882a593Smuzhiyun void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
261*4882a593Smuzhiyun 				    ulong dear_flags, ulong esr_flags)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun 	vcpu->arch.queued_dear = dear_flags;
264*4882a593Smuzhiyun 	vcpu->arch.queued_esr = esr_flags;
265*4882a593Smuzhiyun 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun 
kvmppc_core_queue_itlb_miss(struct kvm_vcpu * vcpu)268*4882a593Smuzhiyun void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun 
kvmppc_core_queue_inst_storage(struct kvm_vcpu * vcpu,ulong esr_flags)273*4882a593Smuzhiyun void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong esr_flags)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun 	vcpu->arch.queued_esr = esr_flags;
276*4882a593Smuzhiyun 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun 
kvmppc_core_queue_alignment(struct kvm_vcpu * vcpu,ulong dear_flags,ulong esr_flags)279*4882a593Smuzhiyun static void kvmppc_core_queue_alignment(struct kvm_vcpu *vcpu, ulong dear_flags,
280*4882a593Smuzhiyun 					ulong esr_flags)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun 	vcpu->arch.queued_dear = dear_flags;
283*4882a593Smuzhiyun 	vcpu->arch.queued_esr = esr_flags;
284*4882a593Smuzhiyun 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALIGNMENT);
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun 
kvmppc_core_queue_program(struct kvm_vcpu * vcpu,ulong esr_flags)287*4882a593Smuzhiyun void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun 	vcpu->arch.queued_esr = esr_flags;
290*4882a593Smuzhiyun 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun 
kvmppc_core_queue_fpunavail(struct kvm_vcpu * vcpu)293*4882a593Smuzhiyun void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun #ifdef CONFIG_ALTIVEC
kvmppc_core_queue_vec_unavail(struct kvm_vcpu * vcpu)299*4882a593Smuzhiyun void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun #endif
304*4882a593Smuzhiyun 
kvmppc_core_queue_dec(struct kvm_vcpu * vcpu)305*4882a593Smuzhiyun void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun 
kvmppc_core_pending_dec(struct kvm_vcpu * vcpu)310*4882a593Smuzhiyun int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun 	return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun 
kvmppc_core_dequeue_dec(struct kvm_vcpu * vcpu)315*4882a593Smuzhiyun void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun 	clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun 
kvmppc_core_queue_external(struct kvm_vcpu * vcpu,struct kvm_interrupt * irq)320*4882a593Smuzhiyun void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
321*4882a593Smuzhiyun                                 struct kvm_interrupt *irq)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun 	unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
326*4882a593Smuzhiyun 		prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	kvmppc_booke_queue_irqprio(vcpu, prio);
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun 
kvmppc_core_dequeue_external(struct kvm_vcpu * vcpu)331*4882a593Smuzhiyun void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun 	clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
334*4882a593Smuzhiyun 	clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun 
kvmppc_core_queue_watchdog(struct kvm_vcpu * vcpu)337*4882a593Smuzhiyun static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG);
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun 
kvmppc_core_dequeue_watchdog(struct kvm_vcpu * vcpu)342*4882a593Smuzhiyun static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun 	clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions);
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun 
kvmppc_core_queue_debug(struct kvm_vcpu * vcpu)347*4882a593Smuzhiyun void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DEBUG);
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun 
kvmppc_core_dequeue_debug(struct kvm_vcpu * vcpu)352*4882a593Smuzhiyun void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun 	clear_bit(BOOKE_IRQPRIO_DEBUG, &vcpu->arch.pending_exceptions);
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun 
set_guest_srr(struct kvm_vcpu * vcpu,unsigned long srr0,u32 srr1)357*4882a593Smuzhiyun static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun 	kvmppc_set_srr0(vcpu, srr0);
360*4882a593Smuzhiyun 	kvmppc_set_srr1(vcpu, srr1);
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun 
set_guest_csrr(struct kvm_vcpu * vcpu,unsigned long srr0,u32 srr1)363*4882a593Smuzhiyun static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun 	vcpu->arch.csrr0 = srr0;
366*4882a593Smuzhiyun 	vcpu->arch.csrr1 = srr1;
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun 
set_guest_dsrr(struct kvm_vcpu * vcpu,unsigned long srr0,u32 srr1)369*4882a593Smuzhiyun static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
372*4882a593Smuzhiyun 		vcpu->arch.dsrr0 = srr0;
373*4882a593Smuzhiyun 		vcpu->arch.dsrr1 = srr1;
374*4882a593Smuzhiyun 	} else {
375*4882a593Smuzhiyun 		set_guest_csrr(vcpu, srr0, srr1);
376*4882a593Smuzhiyun 	}
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun 
set_guest_mcsrr(struct kvm_vcpu * vcpu,unsigned long srr0,u32 srr1)379*4882a593Smuzhiyun static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun 	vcpu->arch.mcsrr0 = srr0;
382*4882a593Smuzhiyun 	vcpu->arch.mcsrr1 = srr1;
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun /* Deliver the interrupt of the corresponding priority, if possible. */
kvmppc_booke_irqprio_deliver(struct kvm_vcpu * vcpu,unsigned int priority)386*4882a593Smuzhiyun static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
387*4882a593Smuzhiyun                                         unsigned int priority)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun 	int allowed = 0;
390*4882a593Smuzhiyun 	ulong msr_mask = 0;
391*4882a593Smuzhiyun 	bool update_esr = false, update_dear = false, update_epr = false;
392*4882a593Smuzhiyun 	ulong crit_raw = vcpu->arch.shared->critical;
393*4882a593Smuzhiyun 	ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
394*4882a593Smuzhiyun 	bool crit;
395*4882a593Smuzhiyun 	bool keep_irq = false;
396*4882a593Smuzhiyun 	enum int_class int_class;
397*4882a593Smuzhiyun 	ulong new_msr = vcpu->arch.shared->msr;
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	/* Truncate crit indicators in 32 bit mode */
400*4882a593Smuzhiyun 	if (!(vcpu->arch.shared->msr & MSR_SF)) {
401*4882a593Smuzhiyun 		crit_raw &= 0xffffffff;
402*4882a593Smuzhiyun 		crit_r1 &= 0xffffffff;
403*4882a593Smuzhiyun 	}
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	/* Critical section when crit == r1 */
406*4882a593Smuzhiyun 	crit = (crit_raw == crit_r1);
407*4882a593Smuzhiyun 	/* ... and we're in supervisor mode */
408*4882a593Smuzhiyun 	crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
411*4882a593Smuzhiyun 		priority = BOOKE_IRQPRIO_EXTERNAL;
412*4882a593Smuzhiyun 		keep_irq = true;
413*4882a593Smuzhiyun 	}
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_flags)
416*4882a593Smuzhiyun 		update_epr = true;
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	switch (priority) {
419*4882a593Smuzhiyun 	case BOOKE_IRQPRIO_DTLB_MISS:
420*4882a593Smuzhiyun 	case BOOKE_IRQPRIO_DATA_STORAGE:
421*4882a593Smuzhiyun 	case BOOKE_IRQPRIO_ALIGNMENT:
422*4882a593Smuzhiyun 		update_dear = true;
423*4882a593Smuzhiyun 		fallthrough;
424*4882a593Smuzhiyun 	case BOOKE_IRQPRIO_INST_STORAGE:
425*4882a593Smuzhiyun 	case BOOKE_IRQPRIO_PROGRAM:
426*4882a593Smuzhiyun 		update_esr = true;
427*4882a593Smuzhiyun 		fallthrough;
428*4882a593Smuzhiyun 	case BOOKE_IRQPRIO_ITLB_MISS:
429*4882a593Smuzhiyun 	case BOOKE_IRQPRIO_SYSCALL:
430*4882a593Smuzhiyun 	case BOOKE_IRQPRIO_FP_UNAVAIL:
431*4882a593Smuzhiyun #ifdef CONFIG_SPE_POSSIBLE
432*4882a593Smuzhiyun 	case BOOKE_IRQPRIO_SPE_UNAVAIL:
433*4882a593Smuzhiyun 	case BOOKE_IRQPRIO_SPE_FP_DATA:
434*4882a593Smuzhiyun 	case BOOKE_IRQPRIO_SPE_FP_ROUND:
435*4882a593Smuzhiyun #endif
436*4882a593Smuzhiyun #ifdef CONFIG_ALTIVEC
437*4882a593Smuzhiyun 	case BOOKE_IRQPRIO_ALTIVEC_UNAVAIL:
438*4882a593Smuzhiyun 	case BOOKE_IRQPRIO_ALTIVEC_ASSIST:
439*4882a593Smuzhiyun #endif
440*4882a593Smuzhiyun 	case BOOKE_IRQPRIO_AP_UNAVAIL:
441*4882a593Smuzhiyun 		allowed = 1;
442*4882a593Smuzhiyun 		msr_mask = MSR_CE | MSR_ME | MSR_DE;
443*4882a593Smuzhiyun 		int_class = INT_CLASS_NONCRIT;
444*4882a593Smuzhiyun 		break;
445*4882a593Smuzhiyun 	case BOOKE_IRQPRIO_WATCHDOG:
446*4882a593Smuzhiyun 	case BOOKE_IRQPRIO_CRITICAL:
447*4882a593Smuzhiyun 	case BOOKE_IRQPRIO_DBELL_CRIT:
448*4882a593Smuzhiyun 		allowed = vcpu->arch.shared->msr & MSR_CE;
449*4882a593Smuzhiyun 		allowed = allowed && !crit;
450*4882a593Smuzhiyun 		msr_mask = MSR_ME;
451*4882a593Smuzhiyun 		int_class = INT_CLASS_CRIT;
452*4882a593Smuzhiyun 		break;
453*4882a593Smuzhiyun 	case BOOKE_IRQPRIO_MACHINE_CHECK:
454*4882a593Smuzhiyun 		allowed = vcpu->arch.shared->msr & MSR_ME;
455*4882a593Smuzhiyun 		allowed = allowed && !crit;
456*4882a593Smuzhiyun 		int_class = INT_CLASS_MC;
457*4882a593Smuzhiyun 		break;
458*4882a593Smuzhiyun 	case BOOKE_IRQPRIO_DECREMENTER:
459*4882a593Smuzhiyun 	case BOOKE_IRQPRIO_FIT:
460*4882a593Smuzhiyun 		keep_irq = true;
461*4882a593Smuzhiyun 		fallthrough;
462*4882a593Smuzhiyun 	case BOOKE_IRQPRIO_EXTERNAL:
463*4882a593Smuzhiyun 	case BOOKE_IRQPRIO_DBELL:
464*4882a593Smuzhiyun 		allowed = vcpu->arch.shared->msr & MSR_EE;
465*4882a593Smuzhiyun 		allowed = allowed && !crit;
466*4882a593Smuzhiyun 		msr_mask = MSR_CE | MSR_ME | MSR_DE;
467*4882a593Smuzhiyun 		int_class = INT_CLASS_NONCRIT;
468*4882a593Smuzhiyun 		break;
469*4882a593Smuzhiyun 	case BOOKE_IRQPRIO_DEBUG:
470*4882a593Smuzhiyun 		allowed = vcpu->arch.shared->msr & MSR_DE;
471*4882a593Smuzhiyun 		allowed = allowed && !crit;
472*4882a593Smuzhiyun 		msr_mask = MSR_ME;
473*4882a593Smuzhiyun 		if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
474*4882a593Smuzhiyun 			int_class = INT_CLASS_DBG;
475*4882a593Smuzhiyun 		else
476*4882a593Smuzhiyun 			int_class = INT_CLASS_CRIT;
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 		break;
479*4882a593Smuzhiyun 	}
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	if (allowed) {
482*4882a593Smuzhiyun 		switch (int_class) {
483*4882a593Smuzhiyun 		case INT_CLASS_NONCRIT:
484*4882a593Smuzhiyun 			set_guest_srr(vcpu, vcpu->arch.regs.nip,
485*4882a593Smuzhiyun 				      vcpu->arch.shared->msr);
486*4882a593Smuzhiyun 			break;
487*4882a593Smuzhiyun 		case INT_CLASS_CRIT:
488*4882a593Smuzhiyun 			set_guest_csrr(vcpu, vcpu->arch.regs.nip,
489*4882a593Smuzhiyun 				       vcpu->arch.shared->msr);
490*4882a593Smuzhiyun 			break;
491*4882a593Smuzhiyun 		case INT_CLASS_DBG:
492*4882a593Smuzhiyun 			set_guest_dsrr(vcpu, vcpu->arch.regs.nip,
493*4882a593Smuzhiyun 				       vcpu->arch.shared->msr);
494*4882a593Smuzhiyun 			break;
495*4882a593Smuzhiyun 		case INT_CLASS_MC:
496*4882a593Smuzhiyun 			set_guest_mcsrr(vcpu, vcpu->arch.regs.nip,
497*4882a593Smuzhiyun 					vcpu->arch.shared->msr);
498*4882a593Smuzhiyun 			break;
499*4882a593Smuzhiyun 		}
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 		vcpu->arch.regs.nip = vcpu->arch.ivpr |
502*4882a593Smuzhiyun 					vcpu->arch.ivor[priority];
503*4882a593Smuzhiyun 		if (update_esr == true)
504*4882a593Smuzhiyun 			kvmppc_set_esr(vcpu, vcpu->arch.queued_esr);
505*4882a593Smuzhiyun 		if (update_dear == true)
506*4882a593Smuzhiyun 			kvmppc_set_dar(vcpu, vcpu->arch.queued_dear);
507*4882a593Smuzhiyun 		if (update_epr == true) {
508*4882a593Smuzhiyun 			if (vcpu->arch.epr_flags & KVMPPC_EPR_USER)
509*4882a593Smuzhiyun 				kvm_make_request(KVM_REQ_EPR_EXIT, vcpu);
510*4882a593Smuzhiyun 			else if (vcpu->arch.epr_flags & KVMPPC_EPR_KERNEL) {
511*4882a593Smuzhiyun 				BUG_ON(vcpu->arch.irq_type != KVMPPC_IRQ_MPIC);
512*4882a593Smuzhiyun 				kvmppc_mpic_set_epr(vcpu);
513*4882a593Smuzhiyun 			}
514*4882a593Smuzhiyun 		}
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 		new_msr &= msr_mask;
517*4882a593Smuzhiyun #if defined(CONFIG_64BIT)
518*4882a593Smuzhiyun 		if (vcpu->arch.epcr & SPRN_EPCR_ICM)
519*4882a593Smuzhiyun 			new_msr |= MSR_CM;
520*4882a593Smuzhiyun #endif
521*4882a593Smuzhiyun 		kvmppc_set_msr(vcpu, new_msr);
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 		if (!keep_irq)
524*4882a593Smuzhiyun 			clear_bit(priority, &vcpu->arch.pending_exceptions);
525*4882a593Smuzhiyun 	}
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOKE_HV
528*4882a593Smuzhiyun 	/*
529*4882a593Smuzhiyun 	 * If an interrupt is pending but masked, raise a guest doorbell
530*4882a593Smuzhiyun 	 * so that we are notified when the guest enables the relevant
531*4882a593Smuzhiyun 	 * MSR bit.
532*4882a593Smuzhiyun 	 */
533*4882a593Smuzhiyun 	if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
534*4882a593Smuzhiyun 		kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
535*4882a593Smuzhiyun 	if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
536*4882a593Smuzhiyun 		kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
537*4882a593Smuzhiyun 	if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
538*4882a593Smuzhiyun 		kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
539*4882a593Smuzhiyun #endif
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	return allowed;
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun /*
545*4882a593Smuzhiyun  * Return the number of jiffies until the next timeout.  If the timeout is
546*4882a593Smuzhiyun  * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA
547*4882a593Smuzhiyun  * because the larger value can break the timer APIs.
548*4882a593Smuzhiyun  */
watchdog_next_timeout(struct kvm_vcpu * vcpu)549*4882a593Smuzhiyun static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun 	u64 tb, wdt_tb, wdt_ticks = 0;
552*4882a593Smuzhiyun 	u64 nr_jiffies = 0;
553*4882a593Smuzhiyun 	u32 period = TCR_GET_WP(vcpu->arch.tcr);
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	wdt_tb = 1ULL << (63 - period);
556*4882a593Smuzhiyun 	tb = get_tb();
557*4882a593Smuzhiyun 	/*
558*4882a593Smuzhiyun 	 * The watchdog timeout will hapeen when TB bit corresponding
559*4882a593Smuzhiyun 	 * to watchdog will toggle from 0 to 1.
560*4882a593Smuzhiyun 	 */
561*4882a593Smuzhiyun 	if (tb & wdt_tb)
562*4882a593Smuzhiyun 		wdt_ticks = wdt_tb;
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	wdt_ticks += wdt_tb - (tb & (wdt_tb - 1));
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	/* Convert timebase ticks to jiffies */
567*4882a593Smuzhiyun 	nr_jiffies = wdt_ticks;
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	if (do_div(nr_jiffies, tb_ticks_per_jiffy))
570*4882a593Smuzhiyun 		nr_jiffies++;
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA);
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun 
arm_next_watchdog(struct kvm_vcpu * vcpu)575*4882a593Smuzhiyun static void arm_next_watchdog(struct kvm_vcpu *vcpu)
576*4882a593Smuzhiyun {
577*4882a593Smuzhiyun 	unsigned long nr_jiffies;
578*4882a593Smuzhiyun 	unsigned long flags;
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	/*
581*4882a593Smuzhiyun 	 * If TSR_ENW and TSR_WIS are not set then no need to exit to
582*4882a593Smuzhiyun 	 * userspace, so clear the KVM_REQ_WATCHDOG request.
583*4882a593Smuzhiyun 	 */
584*4882a593Smuzhiyun 	if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
585*4882a593Smuzhiyun 		kvm_clear_request(KVM_REQ_WATCHDOG, vcpu);
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
588*4882a593Smuzhiyun 	nr_jiffies = watchdog_next_timeout(vcpu);
589*4882a593Smuzhiyun 	/*
590*4882a593Smuzhiyun 	 * If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA
591*4882a593Smuzhiyun 	 * then do not run the watchdog timer as this can break timer APIs.
592*4882a593Smuzhiyun 	 */
593*4882a593Smuzhiyun 	if (nr_jiffies < NEXT_TIMER_MAX_DELTA)
594*4882a593Smuzhiyun 		mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies);
595*4882a593Smuzhiyun 	else
596*4882a593Smuzhiyun 		del_timer(&vcpu->arch.wdt_timer);
597*4882a593Smuzhiyun 	spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun 
kvmppc_watchdog_func(struct timer_list * t)600*4882a593Smuzhiyun void kvmppc_watchdog_func(struct timer_list *t)
601*4882a593Smuzhiyun {
602*4882a593Smuzhiyun 	struct kvm_vcpu *vcpu = from_timer(vcpu, t, arch.wdt_timer);
603*4882a593Smuzhiyun 	u32 tsr, new_tsr;
604*4882a593Smuzhiyun 	int final;
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun 	do {
607*4882a593Smuzhiyun 		new_tsr = tsr = vcpu->arch.tsr;
608*4882a593Smuzhiyun 		final = 0;
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 		/* Time out event */
611*4882a593Smuzhiyun 		if (tsr & TSR_ENW) {
612*4882a593Smuzhiyun 			if (tsr & TSR_WIS)
613*4882a593Smuzhiyun 				final = 1;
614*4882a593Smuzhiyun 			else
615*4882a593Smuzhiyun 				new_tsr = tsr | TSR_WIS;
616*4882a593Smuzhiyun 		} else {
617*4882a593Smuzhiyun 			new_tsr = tsr | TSR_ENW;
618*4882a593Smuzhiyun 		}
619*4882a593Smuzhiyun 	} while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr);
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 	if (new_tsr & TSR_WIS) {
622*4882a593Smuzhiyun 		smp_wmb();
623*4882a593Smuzhiyun 		kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
624*4882a593Smuzhiyun 		kvm_vcpu_kick(vcpu);
625*4882a593Smuzhiyun 	}
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	/*
628*4882a593Smuzhiyun 	 * If this is final watchdog expiry and some action is required
629*4882a593Smuzhiyun 	 * then exit to userspace.
630*4882a593Smuzhiyun 	 */
631*4882a593Smuzhiyun 	if (final && (vcpu->arch.tcr & TCR_WRC_MASK) &&
632*4882a593Smuzhiyun 	    vcpu->arch.watchdog_enabled) {
633*4882a593Smuzhiyun 		smp_wmb();
634*4882a593Smuzhiyun 		kvm_make_request(KVM_REQ_WATCHDOG, vcpu);
635*4882a593Smuzhiyun 		kvm_vcpu_kick(vcpu);
636*4882a593Smuzhiyun 	}
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 	/*
639*4882a593Smuzhiyun 	 * Stop running the watchdog timer after final expiration to
640*4882a593Smuzhiyun 	 * prevent the host from being flooded with timers if the
641*4882a593Smuzhiyun 	 * guest sets a short period.
642*4882a593Smuzhiyun 	 * Timers will resume when TSR/TCR is updated next time.
643*4882a593Smuzhiyun 	 */
644*4882a593Smuzhiyun 	if (!final)
645*4882a593Smuzhiyun 		arm_next_watchdog(vcpu);
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun 
update_timer_ints(struct kvm_vcpu * vcpu)648*4882a593Smuzhiyun static void update_timer_ints(struct kvm_vcpu *vcpu)
649*4882a593Smuzhiyun {
650*4882a593Smuzhiyun 	if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
651*4882a593Smuzhiyun 		kvmppc_core_queue_dec(vcpu);
652*4882a593Smuzhiyun 	else
653*4882a593Smuzhiyun 		kvmppc_core_dequeue_dec(vcpu);
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 	if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS))
656*4882a593Smuzhiyun 		kvmppc_core_queue_watchdog(vcpu);
657*4882a593Smuzhiyun 	else
658*4882a593Smuzhiyun 		kvmppc_core_dequeue_watchdog(vcpu);
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun 
kvmppc_core_check_exceptions(struct kvm_vcpu * vcpu)661*4882a593Smuzhiyun static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
662*4882a593Smuzhiyun {
663*4882a593Smuzhiyun 	unsigned long *pending = &vcpu->arch.pending_exceptions;
664*4882a593Smuzhiyun 	unsigned int priority;
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	priority = __ffs(*pending);
667*4882a593Smuzhiyun 	while (priority < BOOKE_IRQPRIO_MAX) {
668*4882a593Smuzhiyun 		if (kvmppc_booke_irqprio_deliver(vcpu, priority))
669*4882a593Smuzhiyun 			break;
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 		priority = find_next_bit(pending,
672*4882a593Smuzhiyun 		                         BITS_PER_BYTE * sizeof(*pending),
673*4882a593Smuzhiyun 		                         priority + 1);
674*4882a593Smuzhiyun 	}
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 	/* Tell the guest about our interrupt status */
677*4882a593Smuzhiyun 	vcpu->arch.shared->int_pending = !!*pending;
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun /* Check pending exceptions and deliver one, if possible. */
kvmppc_core_prepare_to_enter(struct kvm_vcpu * vcpu)681*4882a593Smuzhiyun int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
682*4882a593Smuzhiyun {
683*4882a593Smuzhiyun 	int r = 0;
684*4882a593Smuzhiyun 	WARN_ON_ONCE(!irqs_disabled());
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 	kvmppc_core_check_exceptions(vcpu);
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 	if (kvm_request_pending(vcpu)) {
689*4882a593Smuzhiyun 		/* Exception delivery raised request; start over */
690*4882a593Smuzhiyun 		return 1;
691*4882a593Smuzhiyun 	}
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	if (vcpu->arch.shared->msr & MSR_WE) {
694*4882a593Smuzhiyun 		local_irq_enable();
695*4882a593Smuzhiyun 		kvm_vcpu_block(vcpu);
696*4882a593Smuzhiyun 		kvm_clear_request(KVM_REQ_UNHALT, vcpu);
697*4882a593Smuzhiyun 		hard_irq_disable();
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 		kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
700*4882a593Smuzhiyun 		r = 1;
701*4882a593Smuzhiyun 	};
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	return r;
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun 
kvmppc_core_check_requests(struct kvm_vcpu * vcpu)706*4882a593Smuzhiyun int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
707*4882a593Smuzhiyun {
708*4882a593Smuzhiyun 	int r = 1; /* Indicate we want to get back into the guest */
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 	if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu))
711*4882a593Smuzhiyun 		update_timer_ints(vcpu);
712*4882a593Smuzhiyun #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
713*4882a593Smuzhiyun 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
714*4882a593Smuzhiyun 		kvmppc_core_flush_tlb(vcpu);
715*4882a593Smuzhiyun #endif
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 	if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
718*4882a593Smuzhiyun 		vcpu->run->exit_reason = KVM_EXIT_WATCHDOG;
719*4882a593Smuzhiyun 		r = 0;
720*4882a593Smuzhiyun 	}
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 	if (kvm_check_request(KVM_REQ_EPR_EXIT, vcpu)) {
723*4882a593Smuzhiyun 		vcpu->run->epr.epr = 0;
724*4882a593Smuzhiyun 		vcpu->arch.epr_needed = true;
725*4882a593Smuzhiyun 		vcpu->run->exit_reason = KVM_EXIT_EPR;
726*4882a593Smuzhiyun 		r = 0;
727*4882a593Smuzhiyun 	}
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 	return r;
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun 
kvmppc_vcpu_run(struct kvm_vcpu * vcpu)732*4882a593Smuzhiyun int kvmppc_vcpu_run(struct kvm_vcpu *vcpu)
733*4882a593Smuzhiyun {
734*4882a593Smuzhiyun 	int ret, s;
735*4882a593Smuzhiyun 	struct debug_reg debug;
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	if (!vcpu->arch.sane) {
738*4882a593Smuzhiyun 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
739*4882a593Smuzhiyun 		return -EINVAL;
740*4882a593Smuzhiyun 	}
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 	s = kvmppc_prepare_to_enter(vcpu);
743*4882a593Smuzhiyun 	if (s <= 0) {
744*4882a593Smuzhiyun 		ret = s;
745*4882a593Smuzhiyun 		goto out;
746*4882a593Smuzhiyun 	}
747*4882a593Smuzhiyun 	/* interrupts now hard-disabled */
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun #ifdef CONFIG_PPC_FPU
750*4882a593Smuzhiyun 	/* Save userspace FPU state in stack */
751*4882a593Smuzhiyun 	enable_kernel_fp();
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 	/*
754*4882a593Smuzhiyun 	 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
755*4882a593Smuzhiyun 	 * as always using the FPU.
756*4882a593Smuzhiyun 	 */
757*4882a593Smuzhiyun 	kvmppc_load_guest_fp(vcpu);
758*4882a593Smuzhiyun #endif
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun #ifdef CONFIG_ALTIVEC
761*4882a593Smuzhiyun 	/* Save userspace AltiVec state in stack */
762*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
763*4882a593Smuzhiyun 		enable_kernel_altivec();
764*4882a593Smuzhiyun 	/*
765*4882a593Smuzhiyun 	 * Since we can't trap on MSR_VEC in GS-mode, we consider the guest
766*4882a593Smuzhiyun 	 * as always using the AltiVec.
767*4882a593Smuzhiyun 	 */
768*4882a593Smuzhiyun 	kvmppc_load_guest_altivec(vcpu);
769*4882a593Smuzhiyun #endif
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	/* Switch to guest debug context */
772*4882a593Smuzhiyun 	debug = vcpu->arch.dbg_reg;
773*4882a593Smuzhiyun 	switch_booke_debug_regs(&debug);
774*4882a593Smuzhiyun 	debug = current->thread.debug;
775*4882a593Smuzhiyun 	current->thread.debug = vcpu->arch.dbg_reg;
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun 	vcpu->arch.pgdir = vcpu->kvm->mm->pgd;
778*4882a593Smuzhiyun 	kvmppc_fix_ee_before_entry();
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 	ret = __kvmppc_vcpu_run(vcpu);
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 	/* No need for guest_exit. It's done in handle_exit.
783*4882a593Smuzhiyun 	   We also get here with interrupts enabled. */
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 	/* Switch back to user space debug context */
786*4882a593Smuzhiyun 	switch_booke_debug_regs(&debug);
787*4882a593Smuzhiyun 	current->thread.debug = debug;
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun #ifdef CONFIG_PPC_FPU
790*4882a593Smuzhiyun 	kvmppc_save_guest_fp(vcpu);
791*4882a593Smuzhiyun #endif
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun #ifdef CONFIG_ALTIVEC
794*4882a593Smuzhiyun 	kvmppc_save_guest_altivec(vcpu);
795*4882a593Smuzhiyun #endif
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun out:
798*4882a593Smuzhiyun 	vcpu->mode = OUTSIDE_GUEST_MODE;
799*4882a593Smuzhiyun 	return ret;
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun 
emulation_exit(struct kvm_vcpu * vcpu)802*4882a593Smuzhiyun static int emulation_exit(struct kvm_vcpu *vcpu)
803*4882a593Smuzhiyun {
804*4882a593Smuzhiyun 	enum emulation_result er;
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 	er = kvmppc_emulate_instruction(vcpu);
807*4882a593Smuzhiyun 	switch (er) {
808*4882a593Smuzhiyun 	case EMULATE_DONE:
809*4882a593Smuzhiyun 		/* don't overwrite subtypes, just account kvm_stats */
810*4882a593Smuzhiyun 		kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
811*4882a593Smuzhiyun 		/* Future optimization: only reload non-volatiles if
812*4882a593Smuzhiyun 		 * they were actually modified by emulation. */
813*4882a593Smuzhiyun 		return RESUME_GUEST_NV;
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun 	case EMULATE_AGAIN:
816*4882a593Smuzhiyun 		return RESUME_GUEST;
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 	case EMULATE_FAIL:
819*4882a593Smuzhiyun 		printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
820*4882a593Smuzhiyun 		       __func__, vcpu->arch.regs.nip, vcpu->arch.last_inst);
821*4882a593Smuzhiyun 		/* For debugging, encode the failing instruction and
822*4882a593Smuzhiyun 		 * report it to userspace. */
823*4882a593Smuzhiyun 		vcpu->run->hw.hardware_exit_reason = ~0ULL << 32;
824*4882a593Smuzhiyun 		vcpu->run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
825*4882a593Smuzhiyun 		kvmppc_core_queue_program(vcpu, ESR_PIL);
826*4882a593Smuzhiyun 		return RESUME_HOST;
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun 	case EMULATE_EXIT_USER:
829*4882a593Smuzhiyun 		return RESUME_HOST;
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	default:
832*4882a593Smuzhiyun 		BUG();
833*4882a593Smuzhiyun 	}
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun 
kvmppc_handle_debug(struct kvm_vcpu * vcpu)836*4882a593Smuzhiyun static int kvmppc_handle_debug(struct kvm_vcpu *vcpu)
837*4882a593Smuzhiyun {
838*4882a593Smuzhiyun 	struct kvm_run *run = vcpu->run;
839*4882a593Smuzhiyun 	struct debug_reg *dbg_reg = &(vcpu->arch.dbg_reg);
840*4882a593Smuzhiyun 	u32 dbsr = vcpu->arch.dbsr;
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 	if (vcpu->guest_debug == 0) {
843*4882a593Smuzhiyun 		/*
844*4882a593Smuzhiyun 		 * Debug resources belong to Guest.
845*4882a593Smuzhiyun 		 * Imprecise debug event is not injected
846*4882a593Smuzhiyun 		 */
847*4882a593Smuzhiyun 		if (dbsr & DBSR_IDE) {
848*4882a593Smuzhiyun 			dbsr &= ~DBSR_IDE;
849*4882a593Smuzhiyun 			if (!dbsr)
850*4882a593Smuzhiyun 				return RESUME_GUEST;
851*4882a593Smuzhiyun 		}
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun 		if (dbsr && (vcpu->arch.shared->msr & MSR_DE) &&
854*4882a593Smuzhiyun 			    (vcpu->arch.dbg_reg.dbcr0 & DBCR0_IDM))
855*4882a593Smuzhiyun 			kvmppc_core_queue_debug(vcpu);
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 		/* Inject a program interrupt if trap debug is not allowed */
858*4882a593Smuzhiyun 		if ((dbsr & DBSR_TIE) && !(vcpu->arch.shared->msr & MSR_DE))
859*4882a593Smuzhiyun 			kvmppc_core_queue_program(vcpu, ESR_PTR);
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 		return RESUME_GUEST;
862*4882a593Smuzhiyun 	}
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun 	/*
865*4882a593Smuzhiyun 	 * Debug resource owned by userspace.
866*4882a593Smuzhiyun 	 * Clear guest dbsr (vcpu->arch.dbsr)
867*4882a593Smuzhiyun 	 */
868*4882a593Smuzhiyun 	vcpu->arch.dbsr = 0;
869*4882a593Smuzhiyun 	run->debug.arch.status = 0;
870*4882a593Smuzhiyun 	run->debug.arch.address = vcpu->arch.regs.nip;
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 	if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) {
873*4882a593Smuzhiyun 		run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT;
874*4882a593Smuzhiyun 	} else {
875*4882a593Smuzhiyun 		if (dbsr & (DBSR_DAC1W | DBSR_DAC2W))
876*4882a593Smuzhiyun 			run->debug.arch.status |= KVMPPC_DEBUG_WATCH_WRITE;
877*4882a593Smuzhiyun 		else if (dbsr & (DBSR_DAC1R | DBSR_DAC2R))
878*4882a593Smuzhiyun 			run->debug.arch.status |= KVMPPC_DEBUG_WATCH_READ;
879*4882a593Smuzhiyun 		if (dbsr & (DBSR_DAC1R | DBSR_DAC1W))
880*4882a593Smuzhiyun 			run->debug.arch.address = dbg_reg->dac1;
881*4882a593Smuzhiyun 		else if (dbsr & (DBSR_DAC2R | DBSR_DAC2W))
882*4882a593Smuzhiyun 			run->debug.arch.address = dbg_reg->dac2;
883*4882a593Smuzhiyun 	}
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 	return RESUME_HOST;
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun 
kvmppc_fill_pt_regs(struct pt_regs * regs)888*4882a593Smuzhiyun static void kvmppc_fill_pt_regs(struct pt_regs *regs)
889*4882a593Smuzhiyun {
890*4882a593Smuzhiyun 	ulong r1, ip, msr, lr;
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 	asm("mr %0, 1" : "=r"(r1));
893*4882a593Smuzhiyun 	asm("mflr %0" : "=r"(lr));
894*4882a593Smuzhiyun 	asm("mfmsr %0" : "=r"(msr));
895*4882a593Smuzhiyun 	asm("bl 1f; 1: mflr %0" : "=r"(ip));
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 	memset(regs, 0, sizeof(*regs));
898*4882a593Smuzhiyun 	regs->gpr[1] = r1;
899*4882a593Smuzhiyun 	regs->nip = ip;
900*4882a593Smuzhiyun 	regs->msr = msr;
901*4882a593Smuzhiyun 	regs->link = lr;
902*4882a593Smuzhiyun }
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun /*
905*4882a593Smuzhiyun  * For interrupts needed to be handled by host interrupt handlers,
906*4882a593Smuzhiyun  * corresponding host handler are called from here in similar way
907*4882a593Smuzhiyun  * (but not exact) as they are called from low level handler
908*4882a593Smuzhiyun  * (such as from arch/powerpc/kernel/head_fsl_booke.S).
909*4882a593Smuzhiyun  */
kvmppc_restart_interrupt(struct kvm_vcpu * vcpu,unsigned int exit_nr)910*4882a593Smuzhiyun static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
911*4882a593Smuzhiyun 				     unsigned int exit_nr)
912*4882a593Smuzhiyun {
913*4882a593Smuzhiyun 	struct pt_regs regs;
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun 	switch (exit_nr) {
916*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_EXTERNAL:
917*4882a593Smuzhiyun 		kvmppc_fill_pt_regs(&regs);
918*4882a593Smuzhiyun 		do_IRQ(&regs);
919*4882a593Smuzhiyun 		break;
920*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_DECREMENTER:
921*4882a593Smuzhiyun 		kvmppc_fill_pt_regs(&regs);
922*4882a593Smuzhiyun 		timer_interrupt(&regs);
923*4882a593Smuzhiyun 		break;
924*4882a593Smuzhiyun #if defined(CONFIG_PPC_DOORBELL)
925*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_DOORBELL:
926*4882a593Smuzhiyun 		kvmppc_fill_pt_regs(&regs);
927*4882a593Smuzhiyun 		doorbell_exception(&regs);
928*4882a593Smuzhiyun 		break;
929*4882a593Smuzhiyun #endif
930*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_MACHINE_CHECK:
931*4882a593Smuzhiyun 		/* FIXME */
932*4882a593Smuzhiyun 		break;
933*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
934*4882a593Smuzhiyun 		kvmppc_fill_pt_regs(&regs);
935*4882a593Smuzhiyun 		performance_monitor_exception(&regs);
936*4882a593Smuzhiyun 		break;
937*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_WATCHDOG:
938*4882a593Smuzhiyun 		kvmppc_fill_pt_regs(&regs);
939*4882a593Smuzhiyun #ifdef CONFIG_BOOKE_WDT
940*4882a593Smuzhiyun 		WatchdogException(&regs);
941*4882a593Smuzhiyun #else
942*4882a593Smuzhiyun 		unknown_exception(&regs);
943*4882a593Smuzhiyun #endif
944*4882a593Smuzhiyun 		break;
945*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_CRITICAL:
946*4882a593Smuzhiyun 		kvmppc_fill_pt_regs(&regs);
947*4882a593Smuzhiyun 		unknown_exception(&regs);
948*4882a593Smuzhiyun 		break;
949*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_DEBUG:
950*4882a593Smuzhiyun 		/* Save DBSR before preemption is enabled */
951*4882a593Smuzhiyun 		vcpu->arch.dbsr = mfspr(SPRN_DBSR);
952*4882a593Smuzhiyun 		kvmppc_clear_dbsr();
953*4882a593Smuzhiyun 		break;
954*4882a593Smuzhiyun 	}
955*4882a593Smuzhiyun }
956*4882a593Smuzhiyun 
kvmppc_resume_inst_load(struct kvm_vcpu * vcpu,enum emulation_result emulated,u32 last_inst)957*4882a593Smuzhiyun static int kvmppc_resume_inst_load(struct kvm_vcpu *vcpu,
958*4882a593Smuzhiyun 				  enum emulation_result emulated, u32 last_inst)
959*4882a593Smuzhiyun {
960*4882a593Smuzhiyun 	switch (emulated) {
961*4882a593Smuzhiyun 	case EMULATE_AGAIN:
962*4882a593Smuzhiyun 		return RESUME_GUEST;
963*4882a593Smuzhiyun 
964*4882a593Smuzhiyun 	case EMULATE_FAIL:
965*4882a593Smuzhiyun 		pr_debug("%s: load instruction from guest address %lx failed\n",
966*4882a593Smuzhiyun 		       __func__, vcpu->arch.regs.nip);
967*4882a593Smuzhiyun 		/* For debugging, encode the failing instruction and
968*4882a593Smuzhiyun 		 * report it to userspace. */
969*4882a593Smuzhiyun 		vcpu->run->hw.hardware_exit_reason = ~0ULL << 32;
970*4882a593Smuzhiyun 		vcpu->run->hw.hardware_exit_reason |= last_inst;
971*4882a593Smuzhiyun 		kvmppc_core_queue_program(vcpu, ESR_PIL);
972*4882a593Smuzhiyun 		return RESUME_HOST;
973*4882a593Smuzhiyun 
974*4882a593Smuzhiyun 	default:
975*4882a593Smuzhiyun 		BUG();
976*4882a593Smuzhiyun 	}
977*4882a593Smuzhiyun }
978*4882a593Smuzhiyun 
979*4882a593Smuzhiyun /**
980*4882a593Smuzhiyun  * kvmppc_handle_exit
981*4882a593Smuzhiyun  *
982*4882a593Smuzhiyun  * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
983*4882a593Smuzhiyun  */
kvmppc_handle_exit(struct kvm_vcpu * vcpu,unsigned int exit_nr)984*4882a593Smuzhiyun int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr)
985*4882a593Smuzhiyun {
986*4882a593Smuzhiyun 	struct kvm_run *run = vcpu->run;
987*4882a593Smuzhiyun 	int r = RESUME_HOST;
988*4882a593Smuzhiyun 	int s;
989*4882a593Smuzhiyun 	int idx;
990*4882a593Smuzhiyun 	u32 last_inst = KVM_INST_FETCH_FAILED;
991*4882a593Smuzhiyun 	enum emulation_result emulated = EMULATE_DONE;
992*4882a593Smuzhiyun 
993*4882a593Smuzhiyun 	/* update before a new last_exit_type is rewritten */
994*4882a593Smuzhiyun 	kvmppc_update_timing_stats(vcpu);
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun 	/* restart interrupts if they were meant for the host */
997*4882a593Smuzhiyun 	kvmppc_restart_interrupt(vcpu, exit_nr);
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 	/*
1000*4882a593Smuzhiyun 	 * get last instruction before being preempted
1001*4882a593Smuzhiyun 	 * TODO: for e6500 check also BOOKE_INTERRUPT_LRAT_ERROR & ESR_DATA
1002*4882a593Smuzhiyun 	 */
1003*4882a593Smuzhiyun 	switch (exit_nr) {
1004*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_DATA_STORAGE:
1005*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_DTLB_MISS:
1006*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_HV_PRIV:
1007*4882a593Smuzhiyun 		emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
1008*4882a593Smuzhiyun 		break;
1009*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_PROGRAM:
1010*4882a593Smuzhiyun 		/* SW breakpoints arrive as illegal instructions on HV */
1011*4882a593Smuzhiyun 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
1012*4882a593Smuzhiyun 			emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
1013*4882a593Smuzhiyun 		break;
1014*4882a593Smuzhiyun 	default:
1015*4882a593Smuzhiyun 		break;
1016*4882a593Smuzhiyun 	}
1017*4882a593Smuzhiyun 
1018*4882a593Smuzhiyun 	trace_kvm_exit(exit_nr, vcpu);
1019*4882a593Smuzhiyun 
1020*4882a593Smuzhiyun 	context_tracking_guest_exit();
1021*4882a593Smuzhiyun 	if (!vtime_accounting_enabled_this_cpu()) {
1022*4882a593Smuzhiyun 		local_irq_enable();
1023*4882a593Smuzhiyun 		/*
1024*4882a593Smuzhiyun 		 * Service IRQs here before vtime_account_guest_exit() so any
1025*4882a593Smuzhiyun 		 * ticks that occurred while running the guest are accounted to
1026*4882a593Smuzhiyun 		 * the guest. If vtime accounting is enabled, accounting uses
1027*4882a593Smuzhiyun 		 * TB rather than ticks, so it can be done without enabling
1028*4882a593Smuzhiyun 		 * interrupts here, which has the problem that it accounts
1029*4882a593Smuzhiyun 		 * interrupt processing overhead to the host.
1030*4882a593Smuzhiyun 		 */
1031*4882a593Smuzhiyun 		local_irq_disable();
1032*4882a593Smuzhiyun 	}
1033*4882a593Smuzhiyun 	vtime_account_guest_exit();
1034*4882a593Smuzhiyun 
1035*4882a593Smuzhiyun 	local_irq_enable();
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun 	run->exit_reason = KVM_EXIT_UNKNOWN;
1038*4882a593Smuzhiyun 	run->ready_for_interrupt_injection = 1;
1039*4882a593Smuzhiyun 
1040*4882a593Smuzhiyun 	if (emulated != EMULATE_DONE) {
1041*4882a593Smuzhiyun 		r = kvmppc_resume_inst_load(vcpu, emulated, last_inst);
1042*4882a593Smuzhiyun 		goto out;
1043*4882a593Smuzhiyun 	}
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 	switch (exit_nr) {
1046*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_MACHINE_CHECK:
1047*4882a593Smuzhiyun 		printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
1048*4882a593Smuzhiyun 		kvmppc_dump_vcpu(vcpu);
1049*4882a593Smuzhiyun 		/* For debugging, send invalid exit reason to user space */
1050*4882a593Smuzhiyun 		run->hw.hardware_exit_reason = ~1ULL << 32;
1051*4882a593Smuzhiyun 		run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR);
1052*4882a593Smuzhiyun 		r = RESUME_HOST;
1053*4882a593Smuzhiyun 		break;
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_EXTERNAL:
1056*4882a593Smuzhiyun 		kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
1057*4882a593Smuzhiyun 		r = RESUME_GUEST;
1058*4882a593Smuzhiyun 		break;
1059*4882a593Smuzhiyun 
1060*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_DECREMENTER:
1061*4882a593Smuzhiyun 		kvmppc_account_exit(vcpu, DEC_EXITS);
1062*4882a593Smuzhiyun 		r = RESUME_GUEST;
1063*4882a593Smuzhiyun 		break;
1064*4882a593Smuzhiyun 
1065*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_WATCHDOG:
1066*4882a593Smuzhiyun 		r = RESUME_GUEST;
1067*4882a593Smuzhiyun 		break;
1068*4882a593Smuzhiyun 
1069*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_DOORBELL:
1070*4882a593Smuzhiyun 		kvmppc_account_exit(vcpu, DBELL_EXITS);
1071*4882a593Smuzhiyun 		r = RESUME_GUEST;
1072*4882a593Smuzhiyun 		break;
1073*4882a593Smuzhiyun 
1074*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
1075*4882a593Smuzhiyun 		kvmppc_account_exit(vcpu, GDBELL_EXITS);
1076*4882a593Smuzhiyun 
1077*4882a593Smuzhiyun 		/*
1078*4882a593Smuzhiyun 		 * We are here because there is a pending guest interrupt
1079*4882a593Smuzhiyun 		 * which could not be delivered as MSR_CE or MSR_ME was not
1080*4882a593Smuzhiyun 		 * set.  Once we break from here we will retry delivery.
1081*4882a593Smuzhiyun 		 */
1082*4882a593Smuzhiyun 		r = RESUME_GUEST;
1083*4882a593Smuzhiyun 		break;
1084*4882a593Smuzhiyun 
1085*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_GUEST_DBELL:
1086*4882a593Smuzhiyun 		kvmppc_account_exit(vcpu, GDBELL_EXITS);
1087*4882a593Smuzhiyun 
1088*4882a593Smuzhiyun 		/*
1089*4882a593Smuzhiyun 		 * We are here because there is a pending guest interrupt
1090*4882a593Smuzhiyun 		 * which could not be delivered as MSR_EE was not set.  Once
1091*4882a593Smuzhiyun 		 * we break from here we will retry delivery.
1092*4882a593Smuzhiyun 		 */
1093*4882a593Smuzhiyun 		r = RESUME_GUEST;
1094*4882a593Smuzhiyun 		break;
1095*4882a593Smuzhiyun 
1096*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
1097*4882a593Smuzhiyun 		r = RESUME_GUEST;
1098*4882a593Smuzhiyun 		break;
1099*4882a593Smuzhiyun 
1100*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_HV_PRIV:
1101*4882a593Smuzhiyun 		r = emulation_exit(vcpu);
1102*4882a593Smuzhiyun 		break;
1103*4882a593Smuzhiyun 
1104*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_PROGRAM:
1105*4882a593Smuzhiyun 		if ((vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) &&
1106*4882a593Smuzhiyun 			(last_inst == KVMPPC_INST_SW_BREAKPOINT)) {
1107*4882a593Smuzhiyun 			/*
1108*4882a593Smuzhiyun 			 * We are here because of an SW breakpoint instr,
1109*4882a593Smuzhiyun 			 * so lets return to host to handle.
1110*4882a593Smuzhiyun 			 */
1111*4882a593Smuzhiyun 			r = kvmppc_handle_debug(vcpu);
1112*4882a593Smuzhiyun 			run->exit_reason = KVM_EXIT_DEBUG;
1113*4882a593Smuzhiyun 			kvmppc_account_exit(vcpu, DEBUG_EXITS);
1114*4882a593Smuzhiyun 			break;
1115*4882a593Smuzhiyun 		}
1116*4882a593Smuzhiyun 
1117*4882a593Smuzhiyun 		if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
1118*4882a593Smuzhiyun 			/*
1119*4882a593Smuzhiyun 			 * Program traps generated by user-level software must
1120*4882a593Smuzhiyun 			 * be handled by the guest kernel.
1121*4882a593Smuzhiyun 			 *
1122*4882a593Smuzhiyun 			 * In GS mode, hypervisor privileged instructions trap
1123*4882a593Smuzhiyun 			 * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are
1124*4882a593Smuzhiyun 			 * actual program interrupts, handled by the guest.
1125*4882a593Smuzhiyun 			 */
1126*4882a593Smuzhiyun 			kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
1127*4882a593Smuzhiyun 			r = RESUME_GUEST;
1128*4882a593Smuzhiyun 			kvmppc_account_exit(vcpu, USR_PR_INST);
1129*4882a593Smuzhiyun 			break;
1130*4882a593Smuzhiyun 		}
1131*4882a593Smuzhiyun 
1132*4882a593Smuzhiyun 		r = emulation_exit(vcpu);
1133*4882a593Smuzhiyun 		break;
1134*4882a593Smuzhiyun 
1135*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_FP_UNAVAIL:
1136*4882a593Smuzhiyun 		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
1137*4882a593Smuzhiyun 		kvmppc_account_exit(vcpu, FP_UNAVAIL);
1138*4882a593Smuzhiyun 		r = RESUME_GUEST;
1139*4882a593Smuzhiyun 		break;
1140*4882a593Smuzhiyun 
1141*4882a593Smuzhiyun #ifdef CONFIG_SPE
1142*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_SPE_UNAVAIL: {
1143*4882a593Smuzhiyun 		if (vcpu->arch.shared->msr & MSR_SPE)
1144*4882a593Smuzhiyun 			kvmppc_vcpu_enable_spe(vcpu);
1145*4882a593Smuzhiyun 		else
1146*4882a593Smuzhiyun 			kvmppc_booke_queue_irqprio(vcpu,
1147*4882a593Smuzhiyun 						   BOOKE_IRQPRIO_SPE_UNAVAIL);
1148*4882a593Smuzhiyun 		r = RESUME_GUEST;
1149*4882a593Smuzhiyun 		break;
1150*4882a593Smuzhiyun 	}
1151*4882a593Smuzhiyun 
1152*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_SPE_FP_DATA:
1153*4882a593Smuzhiyun 		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
1154*4882a593Smuzhiyun 		r = RESUME_GUEST;
1155*4882a593Smuzhiyun 		break;
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_SPE_FP_ROUND:
1158*4882a593Smuzhiyun 		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
1159*4882a593Smuzhiyun 		r = RESUME_GUEST;
1160*4882a593Smuzhiyun 		break;
1161*4882a593Smuzhiyun #elif defined(CONFIG_SPE_POSSIBLE)
1162*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_SPE_UNAVAIL:
1163*4882a593Smuzhiyun 		/*
1164*4882a593Smuzhiyun 		 * Guest wants SPE, but host kernel doesn't support it.  Send
1165*4882a593Smuzhiyun 		 * an "unimplemented operation" program check to the guest.
1166*4882a593Smuzhiyun 		 */
1167*4882a593Smuzhiyun 		kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
1168*4882a593Smuzhiyun 		r = RESUME_GUEST;
1169*4882a593Smuzhiyun 		break;
1170*4882a593Smuzhiyun 
1171*4882a593Smuzhiyun 	/*
1172*4882a593Smuzhiyun 	 * These really should never happen without CONFIG_SPE,
1173*4882a593Smuzhiyun 	 * as we should never enable the real MSR[SPE] in the guest.
1174*4882a593Smuzhiyun 	 */
1175*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_SPE_FP_DATA:
1176*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_SPE_FP_ROUND:
1177*4882a593Smuzhiyun 		printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
1178*4882a593Smuzhiyun 		       __func__, exit_nr, vcpu->arch.regs.nip);
1179*4882a593Smuzhiyun 		run->hw.hardware_exit_reason = exit_nr;
1180*4882a593Smuzhiyun 		r = RESUME_HOST;
1181*4882a593Smuzhiyun 		break;
1182*4882a593Smuzhiyun #endif /* CONFIG_SPE_POSSIBLE */
1183*4882a593Smuzhiyun 
1184*4882a593Smuzhiyun /*
1185*4882a593Smuzhiyun  * On cores with Vector category, KVM is loaded only if CONFIG_ALTIVEC,
1186*4882a593Smuzhiyun  * see kvmppc_core_check_processor_compat().
1187*4882a593Smuzhiyun  */
1188*4882a593Smuzhiyun #ifdef CONFIG_ALTIVEC
1189*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_ALTIVEC_UNAVAIL:
1190*4882a593Smuzhiyun 		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
1191*4882a593Smuzhiyun 		r = RESUME_GUEST;
1192*4882a593Smuzhiyun 		break;
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_ALTIVEC_ASSIST:
1195*4882a593Smuzhiyun 		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_ASSIST);
1196*4882a593Smuzhiyun 		r = RESUME_GUEST;
1197*4882a593Smuzhiyun 		break;
1198*4882a593Smuzhiyun #endif
1199*4882a593Smuzhiyun 
1200*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_DATA_STORAGE:
1201*4882a593Smuzhiyun 		kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
1202*4882a593Smuzhiyun 		                               vcpu->arch.fault_esr);
1203*4882a593Smuzhiyun 		kvmppc_account_exit(vcpu, DSI_EXITS);
1204*4882a593Smuzhiyun 		r = RESUME_GUEST;
1205*4882a593Smuzhiyun 		break;
1206*4882a593Smuzhiyun 
1207*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_INST_STORAGE:
1208*4882a593Smuzhiyun 		kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
1209*4882a593Smuzhiyun 		kvmppc_account_exit(vcpu, ISI_EXITS);
1210*4882a593Smuzhiyun 		r = RESUME_GUEST;
1211*4882a593Smuzhiyun 		break;
1212*4882a593Smuzhiyun 
1213*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_ALIGNMENT:
1214*4882a593Smuzhiyun 		kvmppc_core_queue_alignment(vcpu, vcpu->arch.fault_dear,
1215*4882a593Smuzhiyun 		                            vcpu->arch.fault_esr);
1216*4882a593Smuzhiyun 		r = RESUME_GUEST;
1217*4882a593Smuzhiyun 		break;
1218*4882a593Smuzhiyun 
1219*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOKE_HV
1220*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_HV_SYSCALL:
1221*4882a593Smuzhiyun 		if (!(vcpu->arch.shared->msr & MSR_PR)) {
1222*4882a593Smuzhiyun 			kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1223*4882a593Smuzhiyun 		} else {
1224*4882a593Smuzhiyun 			/*
1225*4882a593Smuzhiyun 			 * hcall from guest userspace -- send privileged
1226*4882a593Smuzhiyun 			 * instruction program check.
1227*4882a593Smuzhiyun 			 */
1228*4882a593Smuzhiyun 			kvmppc_core_queue_program(vcpu, ESR_PPR);
1229*4882a593Smuzhiyun 		}
1230*4882a593Smuzhiyun 
1231*4882a593Smuzhiyun 		r = RESUME_GUEST;
1232*4882a593Smuzhiyun 		break;
1233*4882a593Smuzhiyun #else
1234*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_SYSCALL:
1235*4882a593Smuzhiyun 		if (!(vcpu->arch.shared->msr & MSR_PR) &&
1236*4882a593Smuzhiyun 		    (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1237*4882a593Smuzhiyun 			/* KVM PV hypercalls */
1238*4882a593Smuzhiyun 			kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1239*4882a593Smuzhiyun 			r = RESUME_GUEST;
1240*4882a593Smuzhiyun 		} else {
1241*4882a593Smuzhiyun 			/* Guest syscalls */
1242*4882a593Smuzhiyun 			kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
1243*4882a593Smuzhiyun 		}
1244*4882a593Smuzhiyun 		kvmppc_account_exit(vcpu, SYSCALL_EXITS);
1245*4882a593Smuzhiyun 		r = RESUME_GUEST;
1246*4882a593Smuzhiyun 		break;
1247*4882a593Smuzhiyun #endif
1248*4882a593Smuzhiyun 
1249*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_DTLB_MISS: {
1250*4882a593Smuzhiyun 		unsigned long eaddr = vcpu->arch.fault_dear;
1251*4882a593Smuzhiyun 		int gtlb_index;
1252*4882a593Smuzhiyun 		gpa_t gpaddr;
1253*4882a593Smuzhiyun 		gfn_t gfn;
1254*4882a593Smuzhiyun 
1255*4882a593Smuzhiyun #ifdef CONFIG_KVM_E500V2
1256*4882a593Smuzhiyun 		if (!(vcpu->arch.shared->msr & MSR_PR) &&
1257*4882a593Smuzhiyun 		    (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1258*4882a593Smuzhiyun 			kvmppc_map_magic(vcpu);
1259*4882a593Smuzhiyun 			kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
1260*4882a593Smuzhiyun 			r = RESUME_GUEST;
1261*4882a593Smuzhiyun 
1262*4882a593Smuzhiyun 			break;
1263*4882a593Smuzhiyun 		}
1264*4882a593Smuzhiyun #endif
1265*4882a593Smuzhiyun 
1266*4882a593Smuzhiyun 		/* Check the guest TLB. */
1267*4882a593Smuzhiyun 		gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
1268*4882a593Smuzhiyun 		if (gtlb_index < 0) {
1269*4882a593Smuzhiyun 			/* The guest didn't have a mapping for it. */
1270*4882a593Smuzhiyun 			kvmppc_core_queue_dtlb_miss(vcpu,
1271*4882a593Smuzhiyun 			                            vcpu->arch.fault_dear,
1272*4882a593Smuzhiyun 			                            vcpu->arch.fault_esr);
1273*4882a593Smuzhiyun 			kvmppc_mmu_dtlb_miss(vcpu);
1274*4882a593Smuzhiyun 			kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
1275*4882a593Smuzhiyun 			r = RESUME_GUEST;
1276*4882a593Smuzhiyun 			break;
1277*4882a593Smuzhiyun 		}
1278*4882a593Smuzhiyun 
1279*4882a593Smuzhiyun 		idx = srcu_read_lock(&vcpu->kvm->srcu);
1280*4882a593Smuzhiyun 
1281*4882a593Smuzhiyun 		gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
1282*4882a593Smuzhiyun 		gfn = gpaddr >> PAGE_SHIFT;
1283*4882a593Smuzhiyun 
1284*4882a593Smuzhiyun 		if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1285*4882a593Smuzhiyun 			/* The guest TLB had a mapping, but the shadow TLB
1286*4882a593Smuzhiyun 			 * didn't, and it is RAM. This could be because:
1287*4882a593Smuzhiyun 			 * a) the entry is mapping the host kernel, or
1288*4882a593Smuzhiyun 			 * b) the guest used a large mapping which we're faking
1289*4882a593Smuzhiyun 			 * Either way, we need to satisfy the fault without
1290*4882a593Smuzhiyun 			 * invoking the guest. */
1291*4882a593Smuzhiyun 			kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
1292*4882a593Smuzhiyun 			kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
1293*4882a593Smuzhiyun 			r = RESUME_GUEST;
1294*4882a593Smuzhiyun 		} else {
1295*4882a593Smuzhiyun 			/* Guest has mapped and accessed a page which is not
1296*4882a593Smuzhiyun 			 * actually RAM. */
1297*4882a593Smuzhiyun 			vcpu->arch.paddr_accessed = gpaddr;
1298*4882a593Smuzhiyun 			vcpu->arch.vaddr_accessed = eaddr;
1299*4882a593Smuzhiyun 			r = kvmppc_emulate_mmio(vcpu);
1300*4882a593Smuzhiyun 			kvmppc_account_exit(vcpu, MMIO_EXITS);
1301*4882a593Smuzhiyun 		}
1302*4882a593Smuzhiyun 
1303*4882a593Smuzhiyun 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
1304*4882a593Smuzhiyun 		break;
1305*4882a593Smuzhiyun 	}
1306*4882a593Smuzhiyun 
1307*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_ITLB_MISS: {
1308*4882a593Smuzhiyun 		unsigned long eaddr = vcpu->arch.regs.nip;
1309*4882a593Smuzhiyun 		gpa_t gpaddr;
1310*4882a593Smuzhiyun 		gfn_t gfn;
1311*4882a593Smuzhiyun 		int gtlb_index;
1312*4882a593Smuzhiyun 
1313*4882a593Smuzhiyun 		r = RESUME_GUEST;
1314*4882a593Smuzhiyun 
1315*4882a593Smuzhiyun 		/* Check the guest TLB. */
1316*4882a593Smuzhiyun 		gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
1317*4882a593Smuzhiyun 		if (gtlb_index < 0) {
1318*4882a593Smuzhiyun 			/* The guest didn't have a mapping for it. */
1319*4882a593Smuzhiyun 			kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
1320*4882a593Smuzhiyun 			kvmppc_mmu_itlb_miss(vcpu);
1321*4882a593Smuzhiyun 			kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
1322*4882a593Smuzhiyun 			break;
1323*4882a593Smuzhiyun 		}
1324*4882a593Smuzhiyun 
1325*4882a593Smuzhiyun 		kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
1326*4882a593Smuzhiyun 
1327*4882a593Smuzhiyun 		idx = srcu_read_lock(&vcpu->kvm->srcu);
1328*4882a593Smuzhiyun 
1329*4882a593Smuzhiyun 		gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
1330*4882a593Smuzhiyun 		gfn = gpaddr >> PAGE_SHIFT;
1331*4882a593Smuzhiyun 
1332*4882a593Smuzhiyun 		if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1333*4882a593Smuzhiyun 			/* The guest TLB had a mapping, but the shadow TLB
1334*4882a593Smuzhiyun 			 * didn't. This could be because:
1335*4882a593Smuzhiyun 			 * a) the entry is mapping the host kernel, or
1336*4882a593Smuzhiyun 			 * b) the guest used a large mapping which we're faking
1337*4882a593Smuzhiyun 			 * Either way, we need to satisfy the fault without
1338*4882a593Smuzhiyun 			 * invoking the guest. */
1339*4882a593Smuzhiyun 			kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
1340*4882a593Smuzhiyun 		} else {
1341*4882a593Smuzhiyun 			/* Guest mapped and leaped at non-RAM! */
1342*4882a593Smuzhiyun 			kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
1343*4882a593Smuzhiyun 		}
1344*4882a593Smuzhiyun 
1345*4882a593Smuzhiyun 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
1346*4882a593Smuzhiyun 		break;
1347*4882a593Smuzhiyun 	}
1348*4882a593Smuzhiyun 
1349*4882a593Smuzhiyun 	case BOOKE_INTERRUPT_DEBUG: {
1350*4882a593Smuzhiyun 		r = kvmppc_handle_debug(vcpu);
1351*4882a593Smuzhiyun 		if (r == RESUME_HOST)
1352*4882a593Smuzhiyun 			run->exit_reason = KVM_EXIT_DEBUG;
1353*4882a593Smuzhiyun 		kvmppc_account_exit(vcpu, DEBUG_EXITS);
1354*4882a593Smuzhiyun 		break;
1355*4882a593Smuzhiyun 	}
1356*4882a593Smuzhiyun 
1357*4882a593Smuzhiyun 	default:
1358*4882a593Smuzhiyun 		printk(KERN_EMERG "exit_nr %d\n", exit_nr);
1359*4882a593Smuzhiyun 		BUG();
1360*4882a593Smuzhiyun 	}
1361*4882a593Smuzhiyun 
1362*4882a593Smuzhiyun out:
1363*4882a593Smuzhiyun 	/*
1364*4882a593Smuzhiyun 	 * To avoid clobbering exit_reason, only check for signals if we
1365*4882a593Smuzhiyun 	 * aren't already exiting to userspace for some other reason.
1366*4882a593Smuzhiyun 	 */
1367*4882a593Smuzhiyun 	if (!(r & RESUME_HOST)) {
1368*4882a593Smuzhiyun 		s = kvmppc_prepare_to_enter(vcpu);
1369*4882a593Smuzhiyun 		if (s <= 0)
1370*4882a593Smuzhiyun 			r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
1371*4882a593Smuzhiyun 		else {
1372*4882a593Smuzhiyun 			/* interrupts now hard-disabled */
1373*4882a593Smuzhiyun 			kvmppc_fix_ee_before_entry();
1374*4882a593Smuzhiyun 			kvmppc_load_guest_fp(vcpu);
1375*4882a593Smuzhiyun 			kvmppc_load_guest_altivec(vcpu);
1376*4882a593Smuzhiyun 		}
1377*4882a593Smuzhiyun 	}
1378*4882a593Smuzhiyun 
1379*4882a593Smuzhiyun 	return r;
1380*4882a593Smuzhiyun }
1381*4882a593Smuzhiyun 
kvmppc_set_tsr(struct kvm_vcpu * vcpu,u32 new_tsr)1382*4882a593Smuzhiyun static void kvmppc_set_tsr(struct kvm_vcpu *vcpu, u32 new_tsr)
1383*4882a593Smuzhiyun {
1384*4882a593Smuzhiyun 	u32 old_tsr = vcpu->arch.tsr;
1385*4882a593Smuzhiyun 
1386*4882a593Smuzhiyun 	vcpu->arch.tsr = new_tsr;
1387*4882a593Smuzhiyun 
1388*4882a593Smuzhiyun 	if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS))
1389*4882a593Smuzhiyun 		arm_next_watchdog(vcpu);
1390*4882a593Smuzhiyun 
1391*4882a593Smuzhiyun 	update_timer_ints(vcpu);
1392*4882a593Smuzhiyun }
1393*4882a593Smuzhiyun 
kvmppc_subarch_vcpu_init(struct kvm_vcpu * vcpu)1394*4882a593Smuzhiyun int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
1395*4882a593Smuzhiyun {
1396*4882a593Smuzhiyun 	/* setup watchdog timer once */
1397*4882a593Smuzhiyun 	spin_lock_init(&vcpu->arch.wdt_lock);
1398*4882a593Smuzhiyun 	timer_setup(&vcpu->arch.wdt_timer, kvmppc_watchdog_func, 0);
1399*4882a593Smuzhiyun 
1400*4882a593Smuzhiyun 	/*
1401*4882a593Smuzhiyun 	 * Clear DBSR.MRR to avoid guest debug interrupt as
1402*4882a593Smuzhiyun 	 * this is of host interest
1403*4882a593Smuzhiyun 	 */
1404*4882a593Smuzhiyun 	mtspr(SPRN_DBSR, DBSR_MRR);
1405*4882a593Smuzhiyun 	return 0;
1406*4882a593Smuzhiyun }
1407*4882a593Smuzhiyun 
kvmppc_subarch_vcpu_uninit(struct kvm_vcpu * vcpu)1408*4882a593Smuzhiyun void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
1409*4882a593Smuzhiyun {
1410*4882a593Smuzhiyun 	del_timer_sync(&vcpu->arch.wdt_timer);
1411*4882a593Smuzhiyun }
1412*4882a593Smuzhiyun 
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)1413*4882a593Smuzhiyun int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1414*4882a593Smuzhiyun {
1415*4882a593Smuzhiyun 	int i;
1416*4882a593Smuzhiyun 
1417*4882a593Smuzhiyun 	vcpu_load(vcpu);
1418*4882a593Smuzhiyun 
1419*4882a593Smuzhiyun 	regs->pc = vcpu->arch.regs.nip;
1420*4882a593Smuzhiyun 	regs->cr = kvmppc_get_cr(vcpu);
1421*4882a593Smuzhiyun 	regs->ctr = vcpu->arch.regs.ctr;
1422*4882a593Smuzhiyun 	regs->lr = vcpu->arch.regs.link;
1423*4882a593Smuzhiyun 	regs->xer = kvmppc_get_xer(vcpu);
1424*4882a593Smuzhiyun 	regs->msr = vcpu->arch.shared->msr;
1425*4882a593Smuzhiyun 	regs->srr0 = kvmppc_get_srr0(vcpu);
1426*4882a593Smuzhiyun 	regs->srr1 = kvmppc_get_srr1(vcpu);
1427*4882a593Smuzhiyun 	regs->pid = vcpu->arch.pid;
1428*4882a593Smuzhiyun 	regs->sprg0 = kvmppc_get_sprg0(vcpu);
1429*4882a593Smuzhiyun 	regs->sprg1 = kvmppc_get_sprg1(vcpu);
1430*4882a593Smuzhiyun 	regs->sprg2 = kvmppc_get_sprg2(vcpu);
1431*4882a593Smuzhiyun 	regs->sprg3 = kvmppc_get_sprg3(vcpu);
1432*4882a593Smuzhiyun 	regs->sprg4 = kvmppc_get_sprg4(vcpu);
1433*4882a593Smuzhiyun 	regs->sprg5 = kvmppc_get_sprg5(vcpu);
1434*4882a593Smuzhiyun 	regs->sprg6 = kvmppc_get_sprg6(vcpu);
1435*4882a593Smuzhiyun 	regs->sprg7 = kvmppc_get_sprg7(vcpu);
1436*4882a593Smuzhiyun 
1437*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1438*4882a593Smuzhiyun 		regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
1439*4882a593Smuzhiyun 
1440*4882a593Smuzhiyun 	vcpu_put(vcpu);
1441*4882a593Smuzhiyun 	return 0;
1442*4882a593Smuzhiyun }
1443*4882a593Smuzhiyun 
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)1444*4882a593Smuzhiyun int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1445*4882a593Smuzhiyun {
1446*4882a593Smuzhiyun 	int i;
1447*4882a593Smuzhiyun 
1448*4882a593Smuzhiyun 	vcpu_load(vcpu);
1449*4882a593Smuzhiyun 
1450*4882a593Smuzhiyun 	vcpu->arch.regs.nip = regs->pc;
1451*4882a593Smuzhiyun 	kvmppc_set_cr(vcpu, regs->cr);
1452*4882a593Smuzhiyun 	vcpu->arch.regs.ctr = regs->ctr;
1453*4882a593Smuzhiyun 	vcpu->arch.regs.link = regs->lr;
1454*4882a593Smuzhiyun 	kvmppc_set_xer(vcpu, regs->xer);
1455*4882a593Smuzhiyun 	kvmppc_set_msr(vcpu, regs->msr);
1456*4882a593Smuzhiyun 	kvmppc_set_srr0(vcpu, regs->srr0);
1457*4882a593Smuzhiyun 	kvmppc_set_srr1(vcpu, regs->srr1);
1458*4882a593Smuzhiyun 	kvmppc_set_pid(vcpu, regs->pid);
1459*4882a593Smuzhiyun 	kvmppc_set_sprg0(vcpu, regs->sprg0);
1460*4882a593Smuzhiyun 	kvmppc_set_sprg1(vcpu, regs->sprg1);
1461*4882a593Smuzhiyun 	kvmppc_set_sprg2(vcpu, regs->sprg2);
1462*4882a593Smuzhiyun 	kvmppc_set_sprg3(vcpu, regs->sprg3);
1463*4882a593Smuzhiyun 	kvmppc_set_sprg4(vcpu, regs->sprg4);
1464*4882a593Smuzhiyun 	kvmppc_set_sprg5(vcpu, regs->sprg5);
1465*4882a593Smuzhiyun 	kvmppc_set_sprg6(vcpu, regs->sprg6);
1466*4882a593Smuzhiyun 	kvmppc_set_sprg7(vcpu, regs->sprg7);
1467*4882a593Smuzhiyun 
1468*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1469*4882a593Smuzhiyun 		kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
1470*4882a593Smuzhiyun 
1471*4882a593Smuzhiyun 	vcpu_put(vcpu);
1472*4882a593Smuzhiyun 	return 0;
1473*4882a593Smuzhiyun }
1474*4882a593Smuzhiyun 
get_sregs_base(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1475*4882a593Smuzhiyun static void get_sregs_base(struct kvm_vcpu *vcpu,
1476*4882a593Smuzhiyun                            struct kvm_sregs *sregs)
1477*4882a593Smuzhiyun {
1478*4882a593Smuzhiyun 	u64 tb = get_tb();
1479*4882a593Smuzhiyun 
1480*4882a593Smuzhiyun 	sregs->u.e.features |= KVM_SREGS_E_BASE;
1481*4882a593Smuzhiyun 
1482*4882a593Smuzhiyun 	sregs->u.e.csrr0 = vcpu->arch.csrr0;
1483*4882a593Smuzhiyun 	sregs->u.e.csrr1 = vcpu->arch.csrr1;
1484*4882a593Smuzhiyun 	sregs->u.e.mcsr = vcpu->arch.mcsr;
1485*4882a593Smuzhiyun 	sregs->u.e.esr = kvmppc_get_esr(vcpu);
1486*4882a593Smuzhiyun 	sregs->u.e.dear = kvmppc_get_dar(vcpu);
1487*4882a593Smuzhiyun 	sregs->u.e.tsr = vcpu->arch.tsr;
1488*4882a593Smuzhiyun 	sregs->u.e.tcr = vcpu->arch.tcr;
1489*4882a593Smuzhiyun 	sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
1490*4882a593Smuzhiyun 	sregs->u.e.tb = tb;
1491*4882a593Smuzhiyun 	sregs->u.e.vrsave = vcpu->arch.vrsave;
1492*4882a593Smuzhiyun }
1493*4882a593Smuzhiyun 
set_sregs_base(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1494*4882a593Smuzhiyun static int set_sregs_base(struct kvm_vcpu *vcpu,
1495*4882a593Smuzhiyun                           struct kvm_sregs *sregs)
1496*4882a593Smuzhiyun {
1497*4882a593Smuzhiyun 	if (!(sregs->u.e.features & KVM_SREGS_E_BASE))
1498*4882a593Smuzhiyun 		return 0;
1499*4882a593Smuzhiyun 
1500*4882a593Smuzhiyun 	vcpu->arch.csrr0 = sregs->u.e.csrr0;
1501*4882a593Smuzhiyun 	vcpu->arch.csrr1 = sregs->u.e.csrr1;
1502*4882a593Smuzhiyun 	vcpu->arch.mcsr = sregs->u.e.mcsr;
1503*4882a593Smuzhiyun 	kvmppc_set_esr(vcpu, sregs->u.e.esr);
1504*4882a593Smuzhiyun 	kvmppc_set_dar(vcpu, sregs->u.e.dear);
1505*4882a593Smuzhiyun 	vcpu->arch.vrsave = sregs->u.e.vrsave;
1506*4882a593Smuzhiyun 	kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
1507*4882a593Smuzhiyun 
1508*4882a593Smuzhiyun 	if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) {
1509*4882a593Smuzhiyun 		vcpu->arch.dec = sregs->u.e.dec;
1510*4882a593Smuzhiyun 		kvmppc_emulate_dec(vcpu);
1511*4882a593Smuzhiyun 	}
1512*4882a593Smuzhiyun 
1513*4882a593Smuzhiyun 	if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR)
1514*4882a593Smuzhiyun 		kvmppc_set_tsr(vcpu, sregs->u.e.tsr);
1515*4882a593Smuzhiyun 
1516*4882a593Smuzhiyun 	return 0;
1517*4882a593Smuzhiyun }
1518*4882a593Smuzhiyun 
get_sregs_arch206(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1519*4882a593Smuzhiyun static void get_sregs_arch206(struct kvm_vcpu *vcpu,
1520*4882a593Smuzhiyun                               struct kvm_sregs *sregs)
1521*4882a593Smuzhiyun {
1522*4882a593Smuzhiyun 	sregs->u.e.features |= KVM_SREGS_E_ARCH206;
1523*4882a593Smuzhiyun 
1524*4882a593Smuzhiyun 	sregs->u.e.pir = vcpu->vcpu_id;
1525*4882a593Smuzhiyun 	sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
1526*4882a593Smuzhiyun 	sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
1527*4882a593Smuzhiyun 	sregs->u.e.decar = vcpu->arch.decar;
1528*4882a593Smuzhiyun 	sregs->u.e.ivpr = vcpu->arch.ivpr;
1529*4882a593Smuzhiyun }
1530*4882a593Smuzhiyun 
set_sregs_arch206(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1531*4882a593Smuzhiyun static int set_sregs_arch206(struct kvm_vcpu *vcpu,
1532*4882a593Smuzhiyun                              struct kvm_sregs *sregs)
1533*4882a593Smuzhiyun {
1534*4882a593Smuzhiyun 	if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
1535*4882a593Smuzhiyun 		return 0;
1536*4882a593Smuzhiyun 
1537*4882a593Smuzhiyun 	if (sregs->u.e.pir != vcpu->vcpu_id)
1538*4882a593Smuzhiyun 		return -EINVAL;
1539*4882a593Smuzhiyun 
1540*4882a593Smuzhiyun 	vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
1541*4882a593Smuzhiyun 	vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
1542*4882a593Smuzhiyun 	vcpu->arch.decar = sregs->u.e.decar;
1543*4882a593Smuzhiyun 	vcpu->arch.ivpr = sregs->u.e.ivpr;
1544*4882a593Smuzhiyun 
1545*4882a593Smuzhiyun 	return 0;
1546*4882a593Smuzhiyun }
1547*4882a593Smuzhiyun 
kvmppc_get_sregs_ivor(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1548*4882a593Smuzhiyun int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1549*4882a593Smuzhiyun {
1550*4882a593Smuzhiyun 	sregs->u.e.features |= KVM_SREGS_E_IVOR;
1551*4882a593Smuzhiyun 
1552*4882a593Smuzhiyun 	sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
1553*4882a593Smuzhiyun 	sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
1554*4882a593Smuzhiyun 	sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
1555*4882a593Smuzhiyun 	sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
1556*4882a593Smuzhiyun 	sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
1557*4882a593Smuzhiyun 	sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
1558*4882a593Smuzhiyun 	sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
1559*4882a593Smuzhiyun 	sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
1560*4882a593Smuzhiyun 	sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
1561*4882a593Smuzhiyun 	sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
1562*4882a593Smuzhiyun 	sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
1563*4882a593Smuzhiyun 	sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
1564*4882a593Smuzhiyun 	sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
1565*4882a593Smuzhiyun 	sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
1566*4882a593Smuzhiyun 	sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
1567*4882a593Smuzhiyun 	sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
1568*4882a593Smuzhiyun 	return 0;
1569*4882a593Smuzhiyun }
1570*4882a593Smuzhiyun 
kvmppc_set_sregs_ivor(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1571*4882a593Smuzhiyun int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1572*4882a593Smuzhiyun {
1573*4882a593Smuzhiyun 	if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
1574*4882a593Smuzhiyun 		return 0;
1575*4882a593Smuzhiyun 
1576*4882a593Smuzhiyun 	vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
1577*4882a593Smuzhiyun 	vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
1578*4882a593Smuzhiyun 	vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
1579*4882a593Smuzhiyun 	vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
1580*4882a593Smuzhiyun 	vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
1581*4882a593Smuzhiyun 	vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
1582*4882a593Smuzhiyun 	vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
1583*4882a593Smuzhiyun 	vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
1584*4882a593Smuzhiyun 	vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
1585*4882a593Smuzhiyun 	vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
1586*4882a593Smuzhiyun 	vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
1587*4882a593Smuzhiyun 	vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
1588*4882a593Smuzhiyun 	vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
1589*4882a593Smuzhiyun 	vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
1590*4882a593Smuzhiyun 	vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
1591*4882a593Smuzhiyun 	vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
1592*4882a593Smuzhiyun 
1593*4882a593Smuzhiyun 	return 0;
1594*4882a593Smuzhiyun }
1595*4882a593Smuzhiyun 
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1596*4882a593Smuzhiyun int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1597*4882a593Smuzhiyun                                   struct kvm_sregs *sregs)
1598*4882a593Smuzhiyun {
1599*4882a593Smuzhiyun 	int ret;
1600*4882a593Smuzhiyun 
1601*4882a593Smuzhiyun 	vcpu_load(vcpu);
1602*4882a593Smuzhiyun 
1603*4882a593Smuzhiyun 	sregs->pvr = vcpu->arch.pvr;
1604*4882a593Smuzhiyun 
1605*4882a593Smuzhiyun 	get_sregs_base(vcpu, sregs);
1606*4882a593Smuzhiyun 	get_sregs_arch206(vcpu, sregs);
1607*4882a593Smuzhiyun 	ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
1608*4882a593Smuzhiyun 
1609*4882a593Smuzhiyun 	vcpu_put(vcpu);
1610*4882a593Smuzhiyun 	return ret;
1611*4882a593Smuzhiyun }
1612*4882a593Smuzhiyun 
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1613*4882a593Smuzhiyun int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1614*4882a593Smuzhiyun                                   struct kvm_sregs *sregs)
1615*4882a593Smuzhiyun {
1616*4882a593Smuzhiyun 	int ret = -EINVAL;
1617*4882a593Smuzhiyun 
1618*4882a593Smuzhiyun 	vcpu_load(vcpu);
1619*4882a593Smuzhiyun 	if (vcpu->arch.pvr != sregs->pvr)
1620*4882a593Smuzhiyun 		goto out;
1621*4882a593Smuzhiyun 
1622*4882a593Smuzhiyun 	ret = set_sregs_base(vcpu, sregs);
1623*4882a593Smuzhiyun 	if (ret < 0)
1624*4882a593Smuzhiyun 		goto out;
1625*4882a593Smuzhiyun 
1626*4882a593Smuzhiyun 	ret = set_sregs_arch206(vcpu, sregs);
1627*4882a593Smuzhiyun 	if (ret < 0)
1628*4882a593Smuzhiyun 		goto out;
1629*4882a593Smuzhiyun 
1630*4882a593Smuzhiyun 	ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
1631*4882a593Smuzhiyun 
1632*4882a593Smuzhiyun out:
1633*4882a593Smuzhiyun 	vcpu_put(vcpu);
1634*4882a593Smuzhiyun 	return ret;
1635*4882a593Smuzhiyun }
1636*4882a593Smuzhiyun 
kvmppc_get_one_reg(struct kvm_vcpu * vcpu,u64 id,union kvmppc_one_reg * val)1637*4882a593Smuzhiyun int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
1638*4882a593Smuzhiyun 			union kvmppc_one_reg *val)
1639*4882a593Smuzhiyun {
1640*4882a593Smuzhiyun 	int r = 0;
1641*4882a593Smuzhiyun 
1642*4882a593Smuzhiyun 	switch (id) {
1643*4882a593Smuzhiyun 	case KVM_REG_PPC_IAC1:
1644*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.dbg_reg.iac1);
1645*4882a593Smuzhiyun 		break;
1646*4882a593Smuzhiyun 	case KVM_REG_PPC_IAC2:
1647*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.dbg_reg.iac2);
1648*4882a593Smuzhiyun 		break;
1649*4882a593Smuzhiyun #if CONFIG_PPC_ADV_DEBUG_IACS > 2
1650*4882a593Smuzhiyun 	case KVM_REG_PPC_IAC3:
1651*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.dbg_reg.iac3);
1652*4882a593Smuzhiyun 		break;
1653*4882a593Smuzhiyun 	case KVM_REG_PPC_IAC4:
1654*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.dbg_reg.iac4);
1655*4882a593Smuzhiyun 		break;
1656*4882a593Smuzhiyun #endif
1657*4882a593Smuzhiyun 	case KVM_REG_PPC_DAC1:
1658*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.dbg_reg.dac1);
1659*4882a593Smuzhiyun 		break;
1660*4882a593Smuzhiyun 	case KVM_REG_PPC_DAC2:
1661*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.dbg_reg.dac2);
1662*4882a593Smuzhiyun 		break;
1663*4882a593Smuzhiyun 	case KVM_REG_PPC_EPR: {
1664*4882a593Smuzhiyun 		u32 epr = kvmppc_get_epr(vcpu);
1665*4882a593Smuzhiyun 		*val = get_reg_val(id, epr);
1666*4882a593Smuzhiyun 		break;
1667*4882a593Smuzhiyun 	}
1668*4882a593Smuzhiyun #if defined(CONFIG_64BIT)
1669*4882a593Smuzhiyun 	case KVM_REG_PPC_EPCR:
1670*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.epcr);
1671*4882a593Smuzhiyun 		break;
1672*4882a593Smuzhiyun #endif
1673*4882a593Smuzhiyun 	case KVM_REG_PPC_TCR:
1674*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.tcr);
1675*4882a593Smuzhiyun 		break;
1676*4882a593Smuzhiyun 	case KVM_REG_PPC_TSR:
1677*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.tsr);
1678*4882a593Smuzhiyun 		break;
1679*4882a593Smuzhiyun 	case KVM_REG_PPC_DEBUG_INST:
1680*4882a593Smuzhiyun 		*val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
1681*4882a593Smuzhiyun 		break;
1682*4882a593Smuzhiyun 	case KVM_REG_PPC_VRSAVE:
1683*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.vrsave);
1684*4882a593Smuzhiyun 		break;
1685*4882a593Smuzhiyun 	default:
1686*4882a593Smuzhiyun 		r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
1687*4882a593Smuzhiyun 		break;
1688*4882a593Smuzhiyun 	}
1689*4882a593Smuzhiyun 
1690*4882a593Smuzhiyun 	return r;
1691*4882a593Smuzhiyun }
1692*4882a593Smuzhiyun 
kvmppc_set_one_reg(struct kvm_vcpu * vcpu,u64 id,union kvmppc_one_reg * val)1693*4882a593Smuzhiyun int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
1694*4882a593Smuzhiyun 			union kvmppc_one_reg *val)
1695*4882a593Smuzhiyun {
1696*4882a593Smuzhiyun 	int r = 0;
1697*4882a593Smuzhiyun 
1698*4882a593Smuzhiyun 	switch (id) {
1699*4882a593Smuzhiyun 	case KVM_REG_PPC_IAC1:
1700*4882a593Smuzhiyun 		vcpu->arch.dbg_reg.iac1 = set_reg_val(id, *val);
1701*4882a593Smuzhiyun 		break;
1702*4882a593Smuzhiyun 	case KVM_REG_PPC_IAC2:
1703*4882a593Smuzhiyun 		vcpu->arch.dbg_reg.iac2 = set_reg_val(id, *val);
1704*4882a593Smuzhiyun 		break;
1705*4882a593Smuzhiyun #if CONFIG_PPC_ADV_DEBUG_IACS > 2
1706*4882a593Smuzhiyun 	case KVM_REG_PPC_IAC3:
1707*4882a593Smuzhiyun 		vcpu->arch.dbg_reg.iac3 = set_reg_val(id, *val);
1708*4882a593Smuzhiyun 		break;
1709*4882a593Smuzhiyun 	case KVM_REG_PPC_IAC4:
1710*4882a593Smuzhiyun 		vcpu->arch.dbg_reg.iac4 = set_reg_val(id, *val);
1711*4882a593Smuzhiyun 		break;
1712*4882a593Smuzhiyun #endif
1713*4882a593Smuzhiyun 	case KVM_REG_PPC_DAC1:
1714*4882a593Smuzhiyun 		vcpu->arch.dbg_reg.dac1 = set_reg_val(id, *val);
1715*4882a593Smuzhiyun 		break;
1716*4882a593Smuzhiyun 	case KVM_REG_PPC_DAC2:
1717*4882a593Smuzhiyun 		vcpu->arch.dbg_reg.dac2 = set_reg_val(id, *val);
1718*4882a593Smuzhiyun 		break;
1719*4882a593Smuzhiyun 	case KVM_REG_PPC_EPR: {
1720*4882a593Smuzhiyun 		u32 new_epr = set_reg_val(id, *val);
1721*4882a593Smuzhiyun 		kvmppc_set_epr(vcpu, new_epr);
1722*4882a593Smuzhiyun 		break;
1723*4882a593Smuzhiyun 	}
1724*4882a593Smuzhiyun #if defined(CONFIG_64BIT)
1725*4882a593Smuzhiyun 	case KVM_REG_PPC_EPCR: {
1726*4882a593Smuzhiyun 		u32 new_epcr = set_reg_val(id, *val);
1727*4882a593Smuzhiyun 		kvmppc_set_epcr(vcpu, new_epcr);
1728*4882a593Smuzhiyun 		break;
1729*4882a593Smuzhiyun 	}
1730*4882a593Smuzhiyun #endif
1731*4882a593Smuzhiyun 	case KVM_REG_PPC_OR_TSR: {
1732*4882a593Smuzhiyun 		u32 tsr_bits = set_reg_val(id, *val);
1733*4882a593Smuzhiyun 		kvmppc_set_tsr_bits(vcpu, tsr_bits);
1734*4882a593Smuzhiyun 		break;
1735*4882a593Smuzhiyun 	}
1736*4882a593Smuzhiyun 	case KVM_REG_PPC_CLEAR_TSR: {
1737*4882a593Smuzhiyun 		u32 tsr_bits = set_reg_val(id, *val);
1738*4882a593Smuzhiyun 		kvmppc_clr_tsr_bits(vcpu, tsr_bits);
1739*4882a593Smuzhiyun 		break;
1740*4882a593Smuzhiyun 	}
1741*4882a593Smuzhiyun 	case KVM_REG_PPC_TSR: {
1742*4882a593Smuzhiyun 		u32 tsr = set_reg_val(id, *val);
1743*4882a593Smuzhiyun 		kvmppc_set_tsr(vcpu, tsr);
1744*4882a593Smuzhiyun 		break;
1745*4882a593Smuzhiyun 	}
1746*4882a593Smuzhiyun 	case KVM_REG_PPC_TCR: {
1747*4882a593Smuzhiyun 		u32 tcr = set_reg_val(id, *val);
1748*4882a593Smuzhiyun 		kvmppc_set_tcr(vcpu, tcr);
1749*4882a593Smuzhiyun 		break;
1750*4882a593Smuzhiyun 	}
1751*4882a593Smuzhiyun 	case KVM_REG_PPC_VRSAVE:
1752*4882a593Smuzhiyun 		vcpu->arch.vrsave = set_reg_val(id, *val);
1753*4882a593Smuzhiyun 		break;
1754*4882a593Smuzhiyun 	default:
1755*4882a593Smuzhiyun 		r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
1756*4882a593Smuzhiyun 		break;
1757*4882a593Smuzhiyun 	}
1758*4882a593Smuzhiyun 
1759*4882a593Smuzhiyun 	return r;
1760*4882a593Smuzhiyun }
1761*4882a593Smuzhiyun 
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)1762*4882a593Smuzhiyun int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1763*4882a593Smuzhiyun {
1764*4882a593Smuzhiyun 	return -EOPNOTSUPP;
1765*4882a593Smuzhiyun }
1766*4882a593Smuzhiyun 
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)1767*4882a593Smuzhiyun int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1768*4882a593Smuzhiyun {
1769*4882a593Smuzhiyun 	return -EOPNOTSUPP;
1770*4882a593Smuzhiyun }
1771*4882a593Smuzhiyun 
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)1772*4882a593Smuzhiyun int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1773*4882a593Smuzhiyun                                   struct kvm_translation *tr)
1774*4882a593Smuzhiyun {
1775*4882a593Smuzhiyun 	int r;
1776*4882a593Smuzhiyun 
1777*4882a593Smuzhiyun 	vcpu_load(vcpu);
1778*4882a593Smuzhiyun 	r = kvmppc_core_vcpu_translate(vcpu, tr);
1779*4882a593Smuzhiyun 	vcpu_put(vcpu);
1780*4882a593Smuzhiyun 	return r;
1781*4882a593Smuzhiyun }
1782*4882a593Smuzhiyun 
kvm_arch_sync_dirty_log(struct kvm * kvm,struct kvm_memory_slot * memslot)1783*4882a593Smuzhiyun void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
1784*4882a593Smuzhiyun {
1785*4882a593Smuzhiyun 
1786*4882a593Smuzhiyun }
1787*4882a593Smuzhiyun 
kvm_vm_ioctl_get_dirty_log(struct kvm * kvm,struct kvm_dirty_log * log)1788*4882a593Smuzhiyun int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1789*4882a593Smuzhiyun {
1790*4882a593Smuzhiyun 	return -EOPNOTSUPP;
1791*4882a593Smuzhiyun }
1792*4882a593Smuzhiyun 
kvmppc_core_free_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)1793*4882a593Smuzhiyun void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
1794*4882a593Smuzhiyun {
1795*4882a593Smuzhiyun }
1796*4882a593Smuzhiyun 
kvmppc_core_prepare_memory_region(struct kvm * kvm,struct kvm_memory_slot * memslot,const struct kvm_userspace_memory_region * mem,enum kvm_mr_change change)1797*4882a593Smuzhiyun int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1798*4882a593Smuzhiyun 				      struct kvm_memory_slot *memslot,
1799*4882a593Smuzhiyun 				      const struct kvm_userspace_memory_region *mem,
1800*4882a593Smuzhiyun 				      enum kvm_mr_change change)
1801*4882a593Smuzhiyun {
1802*4882a593Smuzhiyun 	return 0;
1803*4882a593Smuzhiyun }
1804*4882a593Smuzhiyun 
kvmppc_core_commit_memory_region(struct kvm * kvm,const struct kvm_userspace_memory_region * mem,const struct kvm_memory_slot * old,const struct kvm_memory_slot * new,enum kvm_mr_change change)1805*4882a593Smuzhiyun void kvmppc_core_commit_memory_region(struct kvm *kvm,
1806*4882a593Smuzhiyun 				const struct kvm_userspace_memory_region *mem,
1807*4882a593Smuzhiyun 				const struct kvm_memory_slot *old,
1808*4882a593Smuzhiyun 				const struct kvm_memory_slot *new,
1809*4882a593Smuzhiyun 				enum kvm_mr_change change)
1810*4882a593Smuzhiyun {
1811*4882a593Smuzhiyun }
1812*4882a593Smuzhiyun 
kvmppc_core_flush_memslot(struct kvm * kvm,struct kvm_memory_slot * memslot)1813*4882a593Smuzhiyun void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
1814*4882a593Smuzhiyun {
1815*4882a593Smuzhiyun }
1816*4882a593Smuzhiyun 
kvmppc_set_epcr(struct kvm_vcpu * vcpu,u32 new_epcr)1817*4882a593Smuzhiyun void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr)
1818*4882a593Smuzhiyun {
1819*4882a593Smuzhiyun #if defined(CONFIG_64BIT)
1820*4882a593Smuzhiyun 	vcpu->arch.epcr = new_epcr;
1821*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOKE_HV
1822*4882a593Smuzhiyun 	vcpu->arch.shadow_epcr &= ~SPRN_EPCR_GICM;
1823*4882a593Smuzhiyun 	if (vcpu->arch.epcr  & SPRN_EPCR_ICM)
1824*4882a593Smuzhiyun 		vcpu->arch.shadow_epcr |= SPRN_EPCR_GICM;
1825*4882a593Smuzhiyun #endif
1826*4882a593Smuzhiyun #endif
1827*4882a593Smuzhiyun }
1828*4882a593Smuzhiyun 
kvmppc_set_tcr(struct kvm_vcpu * vcpu,u32 new_tcr)1829*4882a593Smuzhiyun void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
1830*4882a593Smuzhiyun {
1831*4882a593Smuzhiyun 	vcpu->arch.tcr = new_tcr;
1832*4882a593Smuzhiyun 	arm_next_watchdog(vcpu);
1833*4882a593Smuzhiyun 	update_timer_ints(vcpu);
1834*4882a593Smuzhiyun }
1835*4882a593Smuzhiyun 
kvmppc_set_tsr_bits(struct kvm_vcpu * vcpu,u32 tsr_bits)1836*4882a593Smuzhiyun void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1837*4882a593Smuzhiyun {
1838*4882a593Smuzhiyun 	set_bits(tsr_bits, &vcpu->arch.tsr);
1839*4882a593Smuzhiyun 	smp_wmb();
1840*4882a593Smuzhiyun 	kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1841*4882a593Smuzhiyun 	kvm_vcpu_kick(vcpu);
1842*4882a593Smuzhiyun }
1843*4882a593Smuzhiyun 
kvmppc_clr_tsr_bits(struct kvm_vcpu * vcpu,u32 tsr_bits)1844*4882a593Smuzhiyun void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1845*4882a593Smuzhiyun {
1846*4882a593Smuzhiyun 	clear_bits(tsr_bits, &vcpu->arch.tsr);
1847*4882a593Smuzhiyun 
1848*4882a593Smuzhiyun 	/*
1849*4882a593Smuzhiyun 	 * We may have stopped the watchdog due to
1850*4882a593Smuzhiyun 	 * being stuck on final expiration.
1851*4882a593Smuzhiyun 	 */
1852*4882a593Smuzhiyun 	if (tsr_bits & (TSR_ENW | TSR_WIS))
1853*4882a593Smuzhiyun 		arm_next_watchdog(vcpu);
1854*4882a593Smuzhiyun 
1855*4882a593Smuzhiyun 	update_timer_ints(vcpu);
1856*4882a593Smuzhiyun }
1857*4882a593Smuzhiyun 
kvmppc_decrementer_func(struct kvm_vcpu * vcpu)1858*4882a593Smuzhiyun void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
1859*4882a593Smuzhiyun {
1860*4882a593Smuzhiyun 	if (vcpu->arch.tcr & TCR_ARE) {
1861*4882a593Smuzhiyun 		vcpu->arch.dec = vcpu->arch.decar;
1862*4882a593Smuzhiyun 		kvmppc_emulate_dec(vcpu);
1863*4882a593Smuzhiyun 	}
1864*4882a593Smuzhiyun 
1865*4882a593Smuzhiyun 	kvmppc_set_tsr_bits(vcpu, TSR_DIS);
1866*4882a593Smuzhiyun }
1867*4882a593Smuzhiyun 
kvmppc_booke_add_breakpoint(struct debug_reg * dbg_reg,uint64_t addr,int index)1868*4882a593Smuzhiyun static int kvmppc_booke_add_breakpoint(struct debug_reg *dbg_reg,
1869*4882a593Smuzhiyun 				       uint64_t addr, int index)
1870*4882a593Smuzhiyun {
1871*4882a593Smuzhiyun 	switch (index) {
1872*4882a593Smuzhiyun 	case 0:
1873*4882a593Smuzhiyun 		dbg_reg->dbcr0 |= DBCR0_IAC1;
1874*4882a593Smuzhiyun 		dbg_reg->iac1 = addr;
1875*4882a593Smuzhiyun 		break;
1876*4882a593Smuzhiyun 	case 1:
1877*4882a593Smuzhiyun 		dbg_reg->dbcr0 |= DBCR0_IAC2;
1878*4882a593Smuzhiyun 		dbg_reg->iac2 = addr;
1879*4882a593Smuzhiyun 		break;
1880*4882a593Smuzhiyun #if CONFIG_PPC_ADV_DEBUG_IACS > 2
1881*4882a593Smuzhiyun 	case 2:
1882*4882a593Smuzhiyun 		dbg_reg->dbcr0 |= DBCR0_IAC3;
1883*4882a593Smuzhiyun 		dbg_reg->iac3 = addr;
1884*4882a593Smuzhiyun 		break;
1885*4882a593Smuzhiyun 	case 3:
1886*4882a593Smuzhiyun 		dbg_reg->dbcr0 |= DBCR0_IAC4;
1887*4882a593Smuzhiyun 		dbg_reg->iac4 = addr;
1888*4882a593Smuzhiyun 		break;
1889*4882a593Smuzhiyun #endif
1890*4882a593Smuzhiyun 	default:
1891*4882a593Smuzhiyun 		return -EINVAL;
1892*4882a593Smuzhiyun 	}
1893*4882a593Smuzhiyun 
1894*4882a593Smuzhiyun 	dbg_reg->dbcr0 |= DBCR0_IDM;
1895*4882a593Smuzhiyun 	return 0;
1896*4882a593Smuzhiyun }
1897*4882a593Smuzhiyun 
kvmppc_booke_add_watchpoint(struct debug_reg * dbg_reg,uint64_t addr,int type,int index)1898*4882a593Smuzhiyun static int kvmppc_booke_add_watchpoint(struct debug_reg *dbg_reg, uint64_t addr,
1899*4882a593Smuzhiyun 				       int type, int index)
1900*4882a593Smuzhiyun {
1901*4882a593Smuzhiyun 	switch (index) {
1902*4882a593Smuzhiyun 	case 0:
1903*4882a593Smuzhiyun 		if (type & KVMPPC_DEBUG_WATCH_READ)
1904*4882a593Smuzhiyun 			dbg_reg->dbcr0 |= DBCR0_DAC1R;
1905*4882a593Smuzhiyun 		if (type & KVMPPC_DEBUG_WATCH_WRITE)
1906*4882a593Smuzhiyun 			dbg_reg->dbcr0 |= DBCR0_DAC1W;
1907*4882a593Smuzhiyun 		dbg_reg->dac1 = addr;
1908*4882a593Smuzhiyun 		break;
1909*4882a593Smuzhiyun 	case 1:
1910*4882a593Smuzhiyun 		if (type & KVMPPC_DEBUG_WATCH_READ)
1911*4882a593Smuzhiyun 			dbg_reg->dbcr0 |= DBCR0_DAC2R;
1912*4882a593Smuzhiyun 		if (type & KVMPPC_DEBUG_WATCH_WRITE)
1913*4882a593Smuzhiyun 			dbg_reg->dbcr0 |= DBCR0_DAC2W;
1914*4882a593Smuzhiyun 		dbg_reg->dac2 = addr;
1915*4882a593Smuzhiyun 		break;
1916*4882a593Smuzhiyun 	default:
1917*4882a593Smuzhiyun 		return -EINVAL;
1918*4882a593Smuzhiyun 	}
1919*4882a593Smuzhiyun 
1920*4882a593Smuzhiyun 	dbg_reg->dbcr0 |= DBCR0_IDM;
1921*4882a593Smuzhiyun 	return 0;
1922*4882a593Smuzhiyun }
kvm_guest_protect_msr(struct kvm_vcpu * vcpu,ulong prot_bitmap,bool set)1923*4882a593Smuzhiyun void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set)
1924*4882a593Smuzhiyun {
1925*4882a593Smuzhiyun 	/* XXX: Add similar MSR protection for BookE-PR */
1926*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOKE_HV
1927*4882a593Smuzhiyun 	BUG_ON(prot_bitmap & ~(MSRP_UCLEP | MSRP_DEP | MSRP_PMMP));
1928*4882a593Smuzhiyun 	if (set) {
1929*4882a593Smuzhiyun 		if (prot_bitmap & MSR_UCLE)
1930*4882a593Smuzhiyun 			vcpu->arch.shadow_msrp |= MSRP_UCLEP;
1931*4882a593Smuzhiyun 		if (prot_bitmap & MSR_DE)
1932*4882a593Smuzhiyun 			vcpu->arch.shadow_msrp |= MSRP_DEP;
1933*4882a593Smuzhiyun 		if (prot_bitmap & MSR_PMM)
1934*4882a593Smuzhiyun 			vcpu->arch.shadow_msrp |= MSRP_PMMP;
1935*4882a593Smuzhiyun 	} else {
1936*4882a593Smuzhiyun 		if (prot_bitmap & MSR_UCLE)
1937*4882a593Smuzhiyun 			vcpu->arch.shadow_msrp &= ~MSRP_UCLEP;
1938*4882a593Smuzhiyun 		if (prot_bitmap & MSR_DE)
1939*4882a593Smuzhiyun 			vcpu->arch.shadow_msrp &= ~MSRP_DEP;
1940*4882a593Smuzhiyun 		if (prot_bitmap & MSR_PMM)
1941*4882a593Smuzhiyun 			vcpu->arch.shadow_msrp &= ~MSRP_PMMP;
1942*4882a593Smuzhiyun 	}
1943*4882a593Smuzhiyun #endif
1944*4882a593Smuzhiyun }
1945*4882a593Smuzhiyun 
kvmppc_xlate(struct kvm_vcpu * vcpu,ulong eaddr,enum xlate_instdata xlid,enum xlate_readwrite xlrw,struct kvmppc_pte * pte)1946*4882a593Smuzhiyun int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
1947*4882a593Smuzhiyun 		 enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
1948*4882a593Smuzhiyun {
1949*4882a593Smuzhiyun 	int gtlb_index;
1950*4882a593Smuzhiyun 	gpa_t gpaddr;
1951*4882a593Smuzhiyun 
1952*4882a593Smuzhiyun #ifdef CONFIG_KVM_E500V2
1953*4882a593Smuzhiyun 	if (!(vcpu->arch.shared->msr & MSR_PR) &&
1954*4882a593Smuzhiyun 	    (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1955*4882a593Smuzhiyun 		pte->eaddr = eaddr;
1956*4882a593Smuzhiyun 		pte->raddr = (vcpu->arch.magic_page_pa & PAGE_MASK) |
1957*4882a593Smuzhiyun 			     (eaddr & ~PAGE_MASK);
1958*4882a593Smuzhiyun 		pte->vpage = eaddr >> PAGE_SHIFT;
1959*4882a593Smuzhiyun 		pte->may_read = true;
1960*4882a593Smuzhiyun 		pte->may_write = true;
1961*4882a593Smuzhiyun 		pte->may_execute = true;
1962*4882a593Smuzhiyun 
1963*4882a593Smuzhiyun 		return 0;
1964*4882a593Smuzhiyun 	}
1965*4882a593Smuzhiyun #endif
1966*4882a593Smuzhiyun 
1967*4882a593Smuzhiyun 	/* Check the guest TLB. */
1968*4882a593Smuzhiyun 	switch (xlid) {
1969*4882a593Smuzhiyun 	case XLATE_INST:
1970*4882a593Smuzhiyun 		gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
1971*4882a593Smuzhiyun 		break;
1972*4882a593Smuzhiyun 	case XLATE_DATA:
1973*4882a593Smuzhiyun 		gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
1974*4882a593Smuzhiyun 		break;
1975*4882a593Smuzhiyun 	default:
1976*4882a593Smuzhiyun 		BUG();
1977*4882a593Smuzhiyun 	}
1978*4882a593Smuzhiyun 
1979*4882a593Smuzhiyun 	/* Do we have a TLB entry at all? */
1980*4882a593Smuzhiyun 	if (gtlb_index < 0)
1981*4882a593Smuzhiyun 		return -ENOENT;
1982*4882a593Smuzhiyun 
1983*4882a593Smuzhiyun 	gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
1984*4882a593Smuzhiyun 
1985*4882a593Smuzhiyun 	pte->eaddr = eaddr;
1986*4882a593Smuzhiyun 	pte->raddr = (gpaddr & PAGE_MASK) | (eaddr & ~PAGE_MASK);
1987*4882a593Smuzhiyun 	pte->vpage = eaddr >> PAGE_SHIFT;
1988*4882a593Smuzhiyun 
1989*4882a593Smuzhiyun 	/* XXX read permissions from the guest TLB */
1990*4882a593Smuzhiyun 	pte->may_read = true;
1991*4882a593Smuzhiyun 	pte->may_write = true;
1992*4882a593Smuzhiyun 	pte->may_execute = true;
1993*4882a593Smuzhiyun 
1994*4882a593Smuzhiyun 	return 0;
1995*4882a593Smuzhiyun }
1996*4882a593Smuzhiyun 
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg)1997*4882a593Smuzhiyun int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1998*4882a593Smuzhiyun 					 struct kvm_guest_debug *dbg)
1999*4882a593Smuzhiyun {
2000*4882a593Smuzhiyun 	struct debug_reg *dbg_reg;
2001*4882a593Smuzhiyun 	int n, b = 0, w = 0;
2002*4882a593Smuzhiyun 	int ret = 0;
2003*4882a593Smuzhiyun 
2004*4882a593Smuzhiyun 	vcpu_load(vcpu);
2005*4882a593Smuzhiyun 
2006*4882a593Smuzhiyun 	if (!(dbg->control & KVM_GUESTDBG_ENABLE)) {
2007*4882a593Smuzhiyun 		vcpu->arch.dbg_reg.dbcr0 = 0;
2008*4882a593Smuzhiyun 		vcpu->guest_debug = 0;
2009*4882a593Smuzhiyun 		kvm_guest_protect_msr(vcpu, MSR_DE, false);
2010*4882a593Smuzhiyun 		goto out;
2011*4882a593Smuzhiyun 	}
2012*4882a593Smuzhiyun 
2013*4882a593Smuzhiyun 	kvm_guest_protect_msr(vcpu, MSR_DE, true);
2014*4882a593Smuzhiyun 	vcpu->guest_debug = dbg->control;
2015*4882a593Smuzhiyun 	vcpu->arch.dbg_reg.dbcr0 = 0;
2016*4882a593Smuzhiyun 
2017*4882a593Smuzhiyun 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
2018*4882a593Smuzhiyun 		vcpu->arch.dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC;
2019*4882a593Smuzhiyun 
2020*4882a593Smuzhiyun 	/* Code below handles only HW breakpoints */
2021*4882a593Smuzhiyun 	dbg_reg = &(vcpu->arch.dbg_reg);
2022*4882a593Smuzhiyun 
2023*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOKE_HV
2024*4882a593Smuzhiyun 	/*
2025*4882a593Smuzhiyun 	 * On BookE-HV (e500mc) the guest is always executed with MSR.GS=1
2026*4882a593Smuzhiyun 	 * DBCR1 and DBCR2 are set to trigger debug events when MSR.PR is 0
2027*4882a593Smuzhiyun 	 */
2028*4882a593Smuzhiyun 	dbg_reg->dbcr1 = 0;
2029*4882a593Smuzhiyun 	dbg_reg->dbcr2 = 0;
2030*4882a593Smuzhiyun #else
2031*4882a593Smuzhiyun 	/*
2032*4882a593Smuzhiyun 	 * On BookE-PR (e500v2) the guest is always executed with MSR.PR=1
2033*4882a593Smuzhiyun 	 * We set DBCR1 and DBCR2 to only trigger debug events when MSR.PR
2034*4882a593Smuzhiyun 	 * is set.
2035*4882a593Smuzhiyun 	 */
2036*4882a593Smuzhiyun 	dbg_reg->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | DBCR1_IAC3US |
2037*4882a593Smuzhiyun 			  DBCR1_IAC4US;
2038*4882a593Smuzhiyun 	dbg_reg->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
2039*4882a593Smuzhiyun #endif
2040*4882a593Smuzhiyun 
2041*4882a593Smuzhiyun 	if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
2042*4882a593Smuzhiyun 		goto out;
2043*4882a593Smuzhiyun 
2044*4882a593Smuzhiyun 	ret = -EINVAL;
2045*4882a593Smuzhiyun 	for (n = 0; n < (KVMPPC_BOOKE_IAC_NUM + KVMPPC_BOOKE_DAC_NUM); n++) {
2046*4882a593Smuzhiyun 		uint64_t addr = dbg->arch.bp[n].addr;
2047*4882a593Smuzhiyun 		uint32_t type = dbg->arch.bp[n].type;
2048*4882a593Smuzhiyun 
2049*4882a593Smuzhiyun 		if (type == KVMPPC_DEBUG_NONE)
2050*4882a593Smuzhiyun 			continue;
2051*4882a593Smuzhiyun 
2052*4882a593Smuzhiyun 		if (type & ~(KVMPPC_DEBUG_WATCH_READ |
2053*4882a593Smuzhiyun 			     KVMPPC_DEBUG_WATCH_WRITE |
2054*4882a593Smuzhiyun 			     KVMPPC_DEBUG_BREAKPOINT))
2055*4882a593Smuzhiyun 			goto out;
2056*4882a593Smuzhiyun 
2057*4882a593Smuzhiyun 		if (type & KVMPPC_DEBUG_BREAKPOINT) {
2058*4882a593Smuzhiyun 			/* Setting H/W breakpoint */
2059*4882a593Smuzhiyun 			if (kvmppc_booke_add_breakpoint(dbg_reg, addr, b++))
2060*4882a593Smuzhiyun 				goto out;
2061*4882a593Smuzhiyun 		} else {
2062*4882a593Smuzhiyun 			/* Setting H/W watchpoint */
2063*4882a593Smuzhiyun 			if (kvmppc_booke_add_watchpoint(dbg_reg, addr,
2064*4882a593Smuzhiyun 							type, w++))
2065*4882a593Smuzhiyun 				goto out;
2066*4882a593Smuzhiyun 		}
2067*4882a593Smuzhiyun 	}
2068*4882a593Smuzhiyun 
2069*4882a593Smuzhiyun 	ret = 0;
2070*4882a593Smuzhiyun out:
2071*4882a593Smuzhiyun 	vcpu_put(vcpu);
2072*4882a593Smuzhiyun 	return ret;
2073*4882a593Smuzhiyun }
2074*4882a593Smuzhiyun 
kvmppc_booke_vcpu_load(struct kvm_vcpu * vcpu,int cpu)2075*4882a593Smuzhiyun void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2076*4882a593Smuzhiyun {
2077*4882a593Smuzhiyun 	vcpu->cpu = smp_processor_id();
2078*4882a593Smuzhiyun 	current->thread.kvm_vcpu = vcpu;
2079*4882a593Smuzhiyun }
2080*4882a593Smuzhiyun 
kvmppc_booke_vcpu_put(struct kvm_vcpu * vcpu)2081*4882a593Smuzhiyun void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
2082*4882a593Smuzhiyun {
2083*4882a593Smuzhiyun 	current->thread.kvm_vcpu = NULL;
2084*4882a593Smuzhiyun 	vcpu->cpu = -1;
2085*4882a593Smuzhiyun 
2086*4882a593Smuzhiyun 	/* Clear pending debug event in DBSR */
2087*4882a593Smuzhiyun 	kvmppc_clear_dbsr();
2088*4882a593Smuzhiyun }
2089*4882a593Smuzhiyun 
kvmppc_core_init_vm(struct kvm * kvm)2090*4882a593Smuzhiyun int kvmppc_core_init_vm(struct kvm *kvm)
2091*4882a593Smuzhiyun {
2092*4882a593Smuzhiyun 	return kvm->arch.kvm_ops->init_vm(kvm);
2093*4882a593Smuzhiyun }
2094*4882a593Smuzhiyun 
kvmppc_core_vcpu_create(struct kvm_vcpu * vcpu)2095*4882a593Smuzhiyun int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu)
2096*4882a593Smuzhiyun {
2097*4882a593Smuzhiyun 	int i;
2098*4882a593Smuzhiyun 	int r;
2099*4882a593Smuzhiyun 
2100*4882a593Smuzhiyun 	r = vcpu->kvm->arch.kvm_ops->vcpu_create(vcpu);
2101*4882a593Smuzhiyun 	if (r)
2102*4882a593Smuzhiyun 		return r;
2103*4882a593Smuzhiyun 
2104*4882a593Smuzhiyun 	/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
2105*4882a593Smuzhiyun 	vcpu->arch.regs.nip = 0;
2106*4882a593Smuzhiyun 	vcpu->arch.shared->pir = vcpu->vcpu_id;
2107*4882a593Smuzhiyun 	kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
2108*4882a593Smuzhiyun 	kvmppc_set_msr(vcpu, 0);
2109*4882a593Smuzhiyun 
2110*4882a593Smuzhiyun #ifndef CONFIG_KVM_BOOKE_HV
2111*4882a593Smuzhiyun 	vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS;
2112*4882a593Smuzhiyun 	vcpu->arch.shadow_pid = 1;
2113*4882a593Smuzhiyun 	vcpu->arch.shared->msr = 0;
2114*4882a593Smuzhiyun #endif
2115*4882a593Smuzhiyun 
2116*4882a593Smuzhiyun 	/* Eye-catching numbers so we know if the guest takes an interrupt
2117*4882a593Smuzhiyun 	 * before it's programmed its own IVPR/IVORs. */
2118*4882a593Smuzhiyun 	vcpu->arch.ivpr = 0x55550000;
2119*4882a593Smuzhiyun 	for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
2120*4882a593Smuzhiyun 		vcpu->arch.ivor[i] = 0x7700 | i * 4;
2121*4882a593Smuzhiyun 
2122*4882a593Smuzhiyun 	kvmppc_init_timing_stats(vcpu);
2123*4882a593Smuzhiyun 
2124*4882a593Smuzhiyun 	r = kvmppc_core_vcpu_setup(vcpu);
2125*4882a593Smuzhiyun 	if (r)
2126*4882a593Smuzhiyun 		vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
2127*4882a593Smuzhiyun 	kvmppc_sanity_check(vcpu);
2128*4882a593Smuzhiyun 	return r;
2129*4882a593Smuzhiyun }
2130*4882a593Smuzhiyun 
kvmppc_core_vcpu_free(struct kvm_vcpu * vcpu)2131*4882a593Smuzhiyun void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
2132*4882a593Smuzhiyun {
2133*4882a593Smuzhiyun 	vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
2134*4882a593Smuzhiyun }
2135*4882a593Smuzhiyun 
kvmppc_core_destroy_vm(struct kvm * kvm)2136*4882a593Smuzhiyun void kvmppc_core_destroy_vm(struct kvm *kvm)
2137*4882a593Smuzhiyun {
2138*4882a593Smuzhiyun 	kvm->arch.kvm_ops->destroy_vm(kvm);
2139*4882a593Smuzhiyun }
2140*4882a593Smuzhiyun 
kvmppc_core_vcpu_load(struct kvm_vcpu * vcpu,int cpu)2141*4882a593Smuzhiyun void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2142*4882a593Smuzhiyun {
2143*4882a593Smuzhiyun 	vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
2144*4882a593Smuzhiyun }
2145*4882a593Smuzhiyun 
kvmppc_core_vcpu_put(struct kvm_vcpu * vcpu)2146*4882a593Smuzhiyun void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
2147*4882a593Smuzhiyun {
2148*4882a593Smuzhiyun 	vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
2149*4882a593Smuzhiyun }
2150*4882a593Smuzhiyun 
kvmppc_booke_init(void)2151*4882a593Smuzhiyun int __init kvmppc_booke_init(void)
2152*4882a593Smuzhiyun {
2153*4882a593Smuzhiyun #ifndef CONFIG_KVM_BOOKE_HV
2154*4882a593Smuzhiyun 	unsigned long ivor[16];
2155*4882a593Smuzhiyun 	unsigned long *handler = kvmppc_booke_handler_addr;
2156*4882a593Smuzhiyun 	unsigned long max_ivor = 0;
2157*4882a593Smuzhiyun 	unsigned long handler_len;
2158*4882a593Smuzhiyun 	int i;
2159*4882a593Smuzhiyun 
2160*4882a593Smuzhiyun 	/* We install our own exception handlers by hijacking IVPR. IVPR must
2161*4882a593Smuzhiyun 	 * be 16-bit aligned, so we need a 64KB allocation. */
2162*4882a593Smuzhiyun 	kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
2163*4882a593Smuzhiyun 	                                         VCPU_SIZE_ORDER);
2164*4882a593Smuzhiyun 	if (!kvmppc_booke_handlers)
2165*4882a593Smuzhiyun 		return -ENOMEM;
2166*4882a593Smuzhiyun 
2167*4882a593Smuzhiyun 	/* XXX make sure our handlers are smaller than Linux's */
2168*4882a593Smuzhiyun 
2169*4882a593Smuzhiyun 	/* Copy our interrupt handlers to match host IVORs. That way we don't
2170*4882a593Smuzhiyun 	 * have to swap the IVORs on every guest/host transition. */
2171*4882a593Smuzhiyun 	ivor[0] = mfspr(SPRN_IVOR0);
2172*4882a593Smuzhiyun 	ivor[1] = mfspr(SPRN_IVOR1);
2173*4882a593Smuzhiyun 	ivor[2] = mfspr(SPRN_IVOR2);
2174*4882a593Smuzhiyun 	ivor[3] = mfspr(SPRN_IVOR3);
2175*4882a593Smuzhiyun 	ivor[4] = mfspr(SPRN_IVOR4);
2176*4882a593Smuzhiyun 	ivor[5] = mfspr(SPRN_IVOR5);
2177*4882a593Smuzhiyun 	ivor[6] = mfspr(SPRN_IVOR6);
2178*4882a593Smuzhiyun 	ivor[7] = mfspr(SPRN_IVOR7);
2179*4882a593Smuzhiyun 	ivor[8] = mfspr(SPRN_IVOR8);
2180*4882a593Smuzhiyun 	ivor[9] = mfspr(SPRN_IVOR9);
2181*4882a593Smuzhiyun 	ivor[10] = mfspr(SPRN_IVOR10);
2182*4882a593Smuzhiyun 	ivor[11] = mfspr(SPRN_IVOR11);
2183*4882a593Smuzhiyun 	ivor[12] = mfspr(SPRN_IVOR12);
2184*4882a593Smuzhiyun 	ivor[13] = mfspr(SPRN_IVOR13);
2185*4882a593Smuzhiyun 	ivor[14] = mfspr(SPRN_IVOR14);
2186*4882a593Smuzhiyun 	ivor[15] = mfspr(SPRN_IVOR15);
2187*4882a593Smuzhiyun 
2188*4882a593Smuzhiyun 	for (i = 0; i < 16; i++) {
2189*4882a593Smuzhiyun 		if (ivor[i] > max_ivor)
2190*4882a593Smuzhiyun 			max_ivor = i;
2191*4882a593Smuzhiyun 
2192*4882a593Smuzhiyun 		handler_len = handler[i + 1] - handler[i];
2193*4882a593Smuzhiyun 		memcpy((void *)kvmppc_booke_handlers + ivor[i],
2194*4882a593Smuzhiyun 		       (void *)handler[i], handler_len);
2195*4882a593Smuzhiyun 	}
2196*4882a593Smuzhiyun 
2197*4882a593Smuzhiyun 	handler_len = handler[max_ivor + 1] - handler[max_ivor];
2198*4882a593Smuzhiyun 	flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
2199*4882a593Smuzhiyun 			   ivor[max_ivor] + handler_len);
2200*4882a593Smuzhiyun #endif /* !BOOKE_HV */
2201*4882a593Smuzhiyun 	return 0;
2202*4882a593Smuzhiyun }
2203*4882a593Smuzhiyun 
kvmppc_booke_exit(void)2204*4882a593Smuzhiyun void __exit kvmppc_booke_exit(void)
2205*4882a593Smuzhiyun {
2206*4882a593Smuzhiyun 	free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
2207*4882a593Smuzhiyun 	kvm_exit();
2208*4882a593Smuzhiyun }
2209