xref: /OK3568_Linux_fs/kernel/arch/powerpc/kvm/book3s_pr.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Authors:
6*4882a593Smuzhiyun  *    Alexander Graf <agraf@suse.de>
7*4882a593Smuzhiyun  *    Kevin Wolf <mail@kevin-wolf.de>
8*4882a593Smuzhiyun  *    Paul Mackerras <paulus@samba.org>
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * Description:
11*4882a593Smuzhiyun  * Functions relating to running KVM on Book 3S processors where
12*4882a593Smuzhiyun  * we don't have access to hypervisor mode, and we run the guest
13*4882a593Smuzhiyun  * in problem state (user mode).
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * This file is derived from arch/powerpc/kvm/44x.c,
16*4882a593Smuzhiyun  * by Hollis Blanchard <hollisb@us.ibm.com>.
17*4882a593Smuzhiyun  */
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #include <linux/kvm_host.h>
20*4882a593Smuzhiyun #include <linux/export.h>
21*4882a593Smuzhiyun #include <linux/err.h>
22*4882a593Smuzhiyun #include <linux/slab.h>
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #include <asm/reg.h>
25*4882a593Smuzhiyun #include <asm/cputable.h>
26*4882a593Smuzhiyun #include <asm/cacheflush.h>
27*4882a593Smuzhiyun #include <linux/uaccess.h>
28*4882a593Smuzhiyun #include <asm/io.h>
29*4882a593Smuzhiyun #include <asm/kvm_ppc.h>
30*4882a593Smuzhiyun #include <asm/kvm_book3s.h>
31*4882a593Smuzhiyun #include <asm/mmu_context.h>
32*4882a593Smuzhiyun #include <asm/switch_to.h>
33*4882a593Smuzhiyun #include <asm/firmware.h>
34*4882a593Smuzhiyun #include <asm/setup.h>
35*4882a593Smuzhiyun #include <linux/gfp.h>
36*4882a593Smuzhiyun #include <linux/sched.h>
37*4882a593Smuzhiyun #include <linux/vmalloc.h>
38*4882a593Smuzhiyun #include <linux/highmem.h>
39*4882a593Smuzhiyun #include <linux/module.h>
40*4882a593Smuzhiyun #include <linux/miscdevice.h>
41*4882a593Smuzhiyun #include <asm/asm-prototypes.h>
42*4882a593Smuzhiyun #include <asm/tm.h>
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun #include "book3s.h"
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun #define CREATE_TRACE_POINTS
47*4882a593Smuzhiyun #include "trace_pr.h"
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun /* #define EXIT_DEBUG */
50*4882a593Smuzhiyun /* #define DEBUG_EXT */
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
53*4882a593Smuzhiyun 			     ulong msr);
54*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
55*4882a593Smuzhiyun static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac);
56*4882a593Smuzhiyun #endif
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun /* Some compatibility defines */
59*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_32
60*4882a593Smuzhiyun #define MSR_USER32 MSR_USER
61*4882a593Smuzhiyun #define MSR_USER64 MSR_USER
62*4882a593Smuzhiyun #define HW_PAGE_SIZE PAGE_SIZE
63*4882a593Smuzhiyun #define HPTE_R_M   _PAGE_COHERENT
64*4882a593Smuzhiyun #endif
65*4882a593Smuzhiyun 
kvmppc_is_split_real(struct kvm_vcpu * vcpu)66*4882a593Smuzhiyun static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	ulong msr = kvmppc_get_msr(vcpu);
69*4882a593Smuzhiyun 	return (msr & (MSR_IR|MSR_DR)) == MSR_DR;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun 
kvmppc_fixup_split_real(struct kvm_vcpu * vcpu)72*4882a593Smuzhiyun static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	ulong msr = kvmppc_get_msr(vcpu);
75*4882a593Smuzhiyun 	ulong pc = kvmppc_get_pc(vcpu);
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	/* We are in DR only split real mode */
78*4882a593Smuzhiyun 	if ((msr & (MSR_IR|MSR_DR)) != MSR_DR)
79*4882a593Smuzhiyun 		return;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	/* We have not fixed up the guest already */
82*4882a593Smuzhiyun 	if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK)
83*4882a593Smuzhiyun 		return;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	/* The code is in fixupable address space */
86*4882a593Smuzhiyun 	if (pc & SPLIT_HACK_MASK)
87*4882a593Smuzhiyun 		return;
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK;
90*4882a593Smuzhiyun 	kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS);
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun 
kvmppc_unfixup_split_real(struct kvm_vcpu * vcpu)93*4882a593Smuzhiyun static void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun 	if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) {
96*4882a593Smuzhiyun 		ulong pc = kvmppc_get_pc(vcpu);
97*4882a593Smuzhiyun 		ulong lr = kvmppc_get_lr(vcpu);
98*4882a593Smuzhiyun 		if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
99*4882a593Smuzhiyun 			kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK);
100*4882a593Smuzhiyun 		if ((lr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
101*4882a593Smuzhiyun 			kvmppc_set_lr(vcpu, lr & ~SPLIT_HACK_MASK);
102*4882a593Smuzhiyun 		vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK;
103*4882a593Smuzhiyun 	}
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun 
kvmppc_inject_interrupt_pr(struct kvm_vcpu * vcpu,int vec,u64 srr1_flags)106*4882a593Smuzhiyun static void kvmppc_inject_interrupt_pr(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun 	unsigned long msr, pc, new_msr, new_pc;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	kvmppc_unfixup_split_real(vcpu);
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	msr = kvmppc_get_msr(vcpu);
113*4882a593Smuzhiyun 	pc = kvmppc_get_pc(vcpu);
114*4882a593Smuzhiyun 	new_msr = vcpu->arch.intr_msr;
115*4882a593Smuzhiyun 	new_pc = to_book3s(vcpu)->hior + vec;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
118*4882a593Smuzhiyun 	/* If transactional, change to suspend mode on IRQ delivery */
119*4882a593Smuzhiyun 	if (MSR_TM_TRANSACTIONAL(msr))
120*4882a593Smuzhiyun 		new_msr |= MSR_TS_S;
121*4882a593Smuzhiyun 	else
122*4882a593Smuzhiyun 		new_msr |= msr & MSR_TS_MASK;
123*4882a593Smuzhiyun #endif
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	kvmppc_set_srr0(vcpu, pc);
126*4882a593Smuzhiyun 	kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags);
127*4882a593Smuzhiyun 	kvmppc_set_pc(vcpu, new_pc);
128*4882a593Smuzhiyun 	kvmppc_set_msr(vcpu, new_msr);
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun 
kvmppc_core_vcpu_load_pr(struct kvm_vcpu * vcpu,int cpu)131*4882a593Smuzhiyun static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
134*4882a593Smuzhiyun 	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
135*4882a593Smuzhiyun 	memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
136*4882a593Smuzhiyun 	svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
137*4882a593Smuzhiyun 	svcpu->in_use = 0;
138*4882a593Smuzhiyun 	svcpu_put(svcpu);
139*4882a593Smuzhiyun #endif
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	/* Disable AIL if supported */
142*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_HVMODE) &&
143*4882a593Smuzhiyun 	    cpu_has_feature(CPU_FTR_ARCH_207S))
144*4882a593Smuzhiyun 		mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_AIL);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	vcpu->cpu = smp_processor_id();
147*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_32
148*4882a593Smuzhiyun 	current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
149*4882a593Smuzhiyun #endif
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	if (kvmppc_is_split_real(vcpu))
152*4882a593Smuzhiyun 		kvmppc_fixup_split_real(vcpu);
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	kvmppc_restore_tm_pr(vcpu);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun 
kvmppc_core_vcpu_put_pr(struct kvm_vcpu * vcpu)157*4882a593Smuzhiyun static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
160*4882a593Smuzhiyun 	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
161*4882a593Smuzhiyun 	if (svcpu->in_use) {
162*4882a593Smuzhiyun 		kvmppc_copy_from_svcpu(vcpu);
163*4882a593Smuzhiyun 	}
164*4882a593Smuzhiyun 	memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
165*4882a593Smuzhiyun 	to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
166*4882a593Smuzhiyun 	svcpu_put(svcpu);
167*4882a593Smuzhiyun #endif
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	if (kvmppc_is_split_real(vcpu))
170*4882a593Smuzhiyun 		kvmppc_unfixup_split_real(vcpu);
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
173*4882a593Smuzhiyun 	kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
174*4882a593Smuzhiyun 	kvmppc_save_tm_pr(vcpu);
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	/* Enable AIL if supported */
177*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_HVMODE) &&
178*4882a593Smuzhiyun 	    cpu_has_feature(CPU_FTR_ARCH_207S))
179*4882a593Smuzhiyun 		mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_AIL_3);
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	vcpu->cpu = -1;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun /* Copy data needed by real-mode code from vcpu to shadow vcpu */
kvmppc_copy_to_svcpu(struct kvm_vcpu * vcpu)185*4882a593Smuzhiyun void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun 	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	svcpu->gpr[0] = vcpu->arch.regs.gpr[0];
190*4882a593Smuzhiyun 	svcpu->gpr[1] = vcpu->arch.regs.gpr[1];
191*4882a593Smuzhiyun 	svcpu->gpr[2] = vcpu->arch.regs.gpr[2];
192*4882a593Smuzhiyun 	svcpu->gpr[3] = vcpu->arch.regs.gpr[3];
193*4882a593Smuzhiyun 	svcpu->gpr[4] = vcpu->arch.regs.gpr[4];
194*4882a593Smuzhiyun 	svcpu->gpr[5] = vcpu->arch.regs.gpr[5];
195*4882a593Smuzhiyun 	svcpu->gpr[6] = vcpu->arch.regs.gpr[6];
196*4882a593Smuzhiyun 	svcpu->gpr[7] = vcpu->arch.regs.gpr[7];
197*4882a593Smuzhiyun 	svcpu->gpr[8] = vcpu->arch.regs.gpr[8];
198*4882a593Smuzhiyun 	svcpu->gpr[9] = vcpu->arch.regs.gpr[9];
199*4882a593Smuzhiyun 	svcpu->gpr[10] = vcpu->arch.regs.gpr[10];
200*4882a593Smuzhiyun 	svcpu->gpr[11] = vcpu->arch.regs.gpr[11];
201*4882a593Smuzhiyun 	svcpu->gpr[12] = vcpu->arch.regs.gpr[12];
202*4882a593Smuzhiyun 	svcpu->gpr[13] = vcpu->arch.regs.gpr[13];
203*4882a593Smuzhiyun 	svcpu->cr  = vcpu->arch.regs.ccr;
204*4882a593Smuzhiyun 	svcpu->xer = vcpu->arch.regs.xer;
205*4882a593Smuzhiyun 	svcpu->ctr = vcpu->arch.regs.ctr;
206*4882a593Smuzhiyun 	svcpu->lr  = vcpu->arch.regs.link;
207*4882a593Smuzhiyun 	svcpu->pc  = vcpu->arch.regs.nip;
208*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
209*4882a593Smuzhiyun 	svcpu->shadow_fscr = vcpu->arch.shadow_fscr;
210*4882a593Smuzhiyun #endif
211*4882a593Smuzhiyun 	/*
212*4882a593Smuzhiyun 	 * Now also save the current time base value. We use this
213*4882a593Smuzhiyun 	 * to find the guest purr and spurr value.
214*4882a593Smuzhiyun 	 */
215*4882a593Smuzhiyun 	vcpu->arch.entry_tb = get_tb();
216*4882a593Smuzhiyun 	vcpu->arch.entry_vtb = get_vtb();
217*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_ARCH_207S))
218*4882a593Smuzhiyun 		vcpu->arch.entry_ic = mfspr(SPRN_IC);
219*4882a593Smuzhiyun 	svcpu->in_use = true;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	svcpu_put(svcpu);
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun 
kvmppc_recalc_shadow_msr(struct kvm_vcpu * vcpu)224*4882a593Smuzhiyun static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun 	ulong guest_msr = kvmppc_get_msr(vcpu);
227*4882a593Smuzhiyun 	ulong smsr = guest_msr;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	/* Guest MSR values */
230*4882a593Smuzhiyun #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
231*4882a593Smuzhiyun 	smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE |
232*4882a593Smuzhiyun 		MSR_TM | MSR_TS_MASK;
233*4882a593Smuzhiyun #else
234*4882a593Smuzhiyun 	smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE;
235*4882a593Smuzhiyun #endif
236*4882a593Smuzhiyun 	/* Process MSR values */
237*4882a593Smuzhiyun 	smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
238*4882a593Smuzhiyun 	/* External providers the guest reserved */
239*4882a593Smuzhiyun 	smsr |= (guest_msr & vcpu->arch.guest_owned_ext);
240*4882a593Smuzhiyun 	/* 64-bit Process MSR values */
241*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
242*4882a593Smuzhiyun 	smsr |= MSR_ISF | MSR_HV;
243*4882a593Smuzhiyun #endif
244*4882a593Smuzhiyun #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
245*4882a593Smuzhiyun 	/*
246*4882a593Smuzhiyun 	 * in guest privileged state, we want to fail all TM transactions.
247*4882a593Smuzhiyun 	 * So disable MSR TM bit so that all tbegin. will be able to be
248*4882a593Smuzhiyun 	 * trapped into host.
249*4882a593Smuzhiyun 	 */
250*4882a593Smuzhiyun 	if (!(guest_msr & MSR_PR))
251*4882a593Smuzhiyun 		smsr &= ~MSR_TM;
252*4882a593Smuzhiyun #endif
253*4882a593Smuzhiyun 	vcpu->arch.shadow_msr = smsr;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun /* Copy data touched by real-mode code from shadow vcpu back to vcpu */
kvmppc_copy_from_svcpu(struct kvm_vcpu * vcpu)257*4882a593Smuzhiyun void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun 	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
260*4882a593Smuzhiyun #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
261*4882a593Smuzhiyun 	ulong old_msr;
262*4882a593Smuzhiyun #endif
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	/*
265*4882a593Smuzhiyun 	 * Maybe we were already preempted and synced the svcpu from
266*4882a593Smuzhiyun 	 * our preempt notifiers. Don't bother touching this svcpu then.
267*4882a593Smuzhiyun 	 */
268*4882a593Smuzhiyun 	if (!svcpu->in_use)
269*4882a593Smuzhiyun 		goto out;
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	vcpu->arch.regs.gpr[0] = svcpu->gpr[0];
272*4882a593Smuzhiyun 	vcpu->arch.regs.gpr[1] = svcpu->gpr[1];
273*4882a593Smuzhiyun 	vcpu->arch.regs.gpr[2] = svcpu->gpr[2];
274*4882a593Smuzhiyun 	vcpu->arch.regs.gpr[3] = svcpu->gpr[3];
275*4882a593Smuzhiyun 	vcpu->arch.regs.gpr[4] = svcpu->gpr[4];
276*4882a593Smuzhiyun 	vcpu->arch.regs.gpr[5] = svcpu->gpr[5];
277*4882a593Smuzhiyun 	vcpu->arch.regs.gpr[6] = svcpu->gpr[6];
278*4882a593Smuzhiyun 	vcpu->arch.regs.gpr[7] = svcpu->gpr[7];
279*4882a593Smuzhiyun 	vcpu->arch.regs.gpr[8] = svcpu->gpr[8];
280*4882a593Smuzhiyun 	vcpu->arch.regs.gpr[9] = svcpu->gpr[9];
281*4882a593Smuzhiyun 	vcpu->arch.regs.gpr[10] = svcpu->gpr[10];
282*4882a593Smuzhiyun 	vcpu->arch.regs.gpr[11] = svcpu->gpr[11];
283*4882a593Smuzhiyun 	vcpu->arch.regs.gpr[12] = svcpu->gpr[12];
284*4882a593Smuzhiyun 	vcpu->arch.regs.gpr[13] = svcpu->gpr[13];
285*4882a593Smuzhiyun 	vcpu->arch.regs.ccr  = svcpu->cr;
286*4882a593Smuzhiyun 	vcpu->arch.regs.xer = svcpu->xer;
287*4882a593Smuzhiyun 	vcpu->arch.regs.ctr = svcpu->ctr;
288*4882a593Smuzhiyun 	vcpu->arch.regs.link  = svcpu->lr;
289*4882a593Smuzhiyun 	vcpu->arch.regs.nip  = svcpu->pc;
290*4882a593Smuzhiyun 	vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
291*4882a593Smuzhiyun 	vcpu->arch.fault_dar   = svcpu->fault_dar;
292*4882a593Smuzhiyun 	vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
293*4882a593Smuzhiyun 	vcpu->arch.last_inst   = svcpu->last_inst;
294*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
295*4882a593Smuzhiyun 	vcpu->arch.shadow_fscr = svcpu->shadow_fscr;
296*4882a593Smuzhiyun #endif
297*4882a593Smuzhiyun 	/*
298*4882a593Smuzhiyun 	 * Update purr and spurr using time base on exit.
299*4882a593Smuzhiyun 	 */
300*4882a593Smuzhiyun 	vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb;
301*4882a593Smuzhiyun 	vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb;
302*4882a593Smuzhiyun 	to_book3s(vcpu)->vtb += get_vtb() - vcpu->arch.entry_vtb;
303*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_ARCH_207S))
304*4882a593Smuzhiyun 		vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic;
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
307*4882a593Smuzhiyun 	/*
308*4882a593Smuzhiyun 	 * Unlike other MSR bits, MSR[TS]bits can be changed at guest without
309*4882a593Smuzhiyun 	 * notifying host:
310*4882a593Smuzhiyun 	 *  modified by unprivileged instructions like "tbegin"/"tend"/
311*4882a593Smuzhiyun 	 * "tresume"/"tsuspend" in PR KVM guest.
312*4882a593Smuzhiyun 	 *
313*4882a593Smuzhiyun 	 * It is necessary to sync here to calculate a correct shadow_msr.
314*4882a593Smuzhiyun 	 *
315*4882a593Smuzhiyun 	 * privileged guest's tbegin will be failed at present. So we
316*4882a593Smuzhiyun 	 * only take care of problem state guest.
317*4882a593Smuzhiyun 	 */
318*4882a593Smuzhiyun 	old_msr = kvmppc_get_msr(vcpu);
319*4882a593Smuzhiyun 	if (unlikely((old_msr & MSR_PR) &&
320*4882a593Smuzhiyun 		(vcpu->arch.shadow_srr1 & (MSR_TS_MASK)) !=
321*4882a593Smuzhiyun 				(old_msr & (MSR_TS_MASK)))) {
322*4882a593Smuzhiyun 		old_msr &= ~(MSR_TS_MASK);
323*4882a593Smuzhiyun 		old_msr |= (vcpu->arch.shadow_srr1 & (MSR_TS_MASK));
324*4882a593Smuzhiyun 		kvmppc_set_msr_fast(vcpu, old_msr);
325*4882a593Smuzhiyun 		kvmppc_recalc_shadow_msr(vcpu);
326*4882a593Smuzhiyun 	}
327*4882a593Smuzhiyun #endif
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	svcpu->in_use = false;
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun out:
332*4882a593Smuzhiyun 	svcpu_put(svcpu);
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
kvmppc_save_tm_sprs(struct kvm_vcpu * vcpu)336*4882a593Smuzhiyun void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun 	tm_enable();
339*4882a593Smuzhiyun 	vcpu->arch.tfhar = mfspr(SPRN_TFHAR);
340*4882a593Smuzhiyun 	vcpu->arch.texasr = mfspr(SPRN_TEXASR);
341*4882a593Smuzhiyun 	vcpu->arch.tfiar = mfspr(SPRN_TFIAR);
342*4882a593Smuzhiyun 	tm_disable();
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun 
kvmppc_restore_tm_sprs(struct kvm_vcpu * vcpu)345*4882a593Smuzhiyun void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun 	tm_enable();
348*4882a593Smuzhiyun 	mtspr(SPRN_TFHAR, vcpu->arch.tfhar);
349*4882a593Smuzhiyun 	mtspr(SPRN_TEXASR, vcpu->arch.texasr);
350*4882a593Smuzhiyun 	mtspr(SPRN_TFIAR, vcpu->arch.tfiar);
351*4882a593Smuzhiyun 	tm_disable();
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun /* loadup math bits which is enabled at kvmppc_get_msr() but not enabled at
355*4882a593Smuzhiyun  * hardware.
356*4882a593Smuzhiyun  */
kvmppc_handle_lost_math_exts(struct kvm_vcpu * vcpu)357*4882a593Smuzhiyun static void kvmppc_handle_lost_math_exts(struct kvm_vcpu *vcpu)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun 	ulong exit_nr;
360*4882a593Smuzhiyun 	ulong ext_diff = (kvmppc_get_msr(vcpu) & ~vcpu->arch.guest_owned_ext) &
361*4882a593Smuzhiyun 		(MSR_FP | MSR_VEC | MSR_VSX);
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	if (!ext_diff)
364*4882a593Smuzhiyun 		return;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	if (ext_diff == MSR_FP)
367*4882a593Smuzhiyun 		exit_nr = BOOK3S_INTERRUPT_FP_UNAVAIL;
368*4882a593Smuzhiyun 	else if (ext_diff == MSR_VEC)
369*4882a593Smuzhiyun 		exit_nr = BOOK3S_INTERRUPT_ALTIVEC;
370*4882a593Smuzhiyun 	else
371*4882a593Smuzhiyun 		exit_nr = BOOK3S_INTERRUPT_VSX;
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	kvmppc_handle_ext(vcpu, exit_nr, ext_diff);
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun 
kvmppc_save_tm_pr(struct kvm_vcpu * vcpu)376*4882a593Smuzhiyun void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun 	if (!(MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)))) {
379*4882a593Smuzhiyun 		kvmppc_save_tm_sprs(vcpu);
380*4882a593Smuzhiyun 		return;
381*4882a593Smuzhiyun 	}
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
384*4882a593Smuzhiyun 	kvmppc_giveup_ext(vcpu, MSR_VSX);
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	preempt_disable();
387*4882a593Smuzhiyun 	_kvmppc_save_tm_pr(vcpu, mfmsr());
388*4882a593Smuzhiyun 	preempt_enable();
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun 
kvmppc_restore_tm_pr(struct kvm_vcpu * vcpu)391*4882a593Smuzhiyun void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun 	if (!MSR_TM_ACTIVE(kvmppc_get_msr(vcpu))) {
394*4882a593Smuzhiyun 		kvmppc_restore_tm_sprs(vcpu);
395*4882a593Smuzhiyun 		if (kvmppc_get_msr(vcpu) & MSR_TM) {
396*4882a593Smuzhiyun 			kvmppc_handle_lost_math_exts(vcpu);
397*4882a593Smuzhiyun 			if (vcpu->arch.fscr & FSCR_TAR)
398*4882a593Smuzhiyun 				kvmppc_handle_fac(vcpu, FSCR_TAR_LG);
399*4882a593Smuzhiyun 		}
400*4882a593Smuzhiyun 		return;
401*4882a593Smuzhiyun 	}
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	preempt_disable();
404*4882a593Smuzhiyun 	_kvmppc_restore_tm_pr(vcpu, kvmppc_get_msr(vcpu));
405*4882a593Smuzhiyun 	preempt_enable();
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	if (kvmppc_get_msr(vcpu) & MSR_TM) {
408*4882a593Smuzhiyun 		kvmppc_handle_lost_math_exts(vcpu);
409*4882a593Smuzhiyun 		if (vcpu->arch.fscr & FSCR_TAR)
410*4882a593Smuzhiyun 			kvmppc_handle_fac(vcpu, FSCR_TAR_LG);
411*4882a593Smuzhiyun 	}
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun #endif
414*4882a593Smuzhiyun 
kvmppc_core_check_requests_pr(struct kvm_vcpu * vcpu)415*4882a593Smuzhiyun static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
416*4882a593Smuzhiyun {
417*4882a593Smuzhiyun 	int r = 1; /* Indicate we want to get back into the guest */
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	/* We misuse TLB_FLUSH to indicate that we want to clear
420*4882a593Smuzhiyun 	   all shadow cache entries */
421*4882a593Smuzhiyun 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
422*4882a593Smuzhiyun 		kvmppc_mmu_pte_flush(vcpu, 0, 0);
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	return r;
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun /************* MMU Notifiers *************/
do_kvm_unmap_hva(struct kvm * kvm,unsigned long start,unsigned long end)428*4882a593Smuzhiyun static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start,
429*4882a593Smuzhiyun 			     unsigned long end)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun 	long i;
432*4882a593Smuzhiyun 	struct kvm_vcpu *vcpu;
433*4882a593Smuzhiyun 	struct kvm_memslots *slots;
434*4882a593Smuzhiyun 	struct kvm_memory_slot *memslot;
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	slots = kvm_memslots(kvm);
437*4882a593Smuzhiyun 	kvm_for_each_memslot(memslot, slots) {
438*4882a593Smuzhiyun 		unsigned long hva_start, hva_end;
439*4882a593Smuzhiyun 		gfn_t gfn, gfn_end;
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 		hva_start = max(start, memslot->userspace_addr);
442*4882a593Smuzhiyun 		hva_end = min(end, memslot->userspace_addr +
443*4882a593Smuzhiyun 					(memslot->npages << PAGE_SHIFT));
444*4882a593Smuzhiyun 		if (hva_start >= hva_end)
445*4882a593Smuzhiyun 			continue;
446*4882a593Smuzhiyun 		/*
447*4882a593Smuzhiyun 		 * {gfn(page) | page intersects with [hva_start, hva_end)} =
448*4882a593Smuzhiyun 		 * {gfn, gfn+1, ..., gfn_end-1}.
449*4882a593Smuzhiyun 		 */
450*4882a593Smuzhiyun 		gfn = hva_to_gfn_memslot(hva_start, memslot);
451*4882a593Smuzhiyun 		gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
452*4882a593Smuzhiyun 		kvm_for_each_vcpu(i, vcpu, kvm)
453*4882a593Smuzhiyun 			kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT,
454*4882a593Smuzhiyun 					      gfn_end << PAGE_SHIFT);
455*4882a593Smuzhiyun 	}
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun 
kvm_unmap_hva_range_pr(struct kvm * kvm,unsigned long start,unsigned long end)458*4882a593Smuzhiyun static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start,
459*4882a593Smuzhiyun 				  unsigned long end)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun 	do_kvm_unmap_hva(kvm, start, end);
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	return 0;
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun 
kvm_age_hva_pr(struct kvm * kvm,unsigned long start,unsigned long end)466*4882a593Smuzhiyun static int kvm_age_hva_pr(struct kvm *kvm, unsigned long start,
467*4882a593Smuzhiyun 			  unsigned long end)
468*4882a593Smuzhiyun {
469*4882a593Smuzhiyun 	/* XXX could be more clever ;) */
470*4882a593Smuzhiyun 	return 0;
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun 
kvm_test_age_hva_pr(struct kvm * kvm,unsigned long hva)473*4882a593Smuzhiyun static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva)
474*4882a593Smuzhiyun {
475*4882a593Smuzhiyun 	/* XXX could be more clever ;) */
476*4882a593Smuzhiyun 	return 0;
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun 
kvm_set_spte_hva_pr(struct kvm * kvm,unsigned long hva,pte_t pte)479*4882a593Smuzhiyun static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte)
480*4882a593Smuzhiyun {
481*4882a593Smuzhiyun 	/* The page will get remapped properly on its next fault */
482*4882a593Smuzhiyun 	do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun /*****************************************/
486*4882a593Smuzhiyun 
kvmppc_set_msr_pr(struct kvm_vcpu * vcpu,u64 msr)487*4882a593Smuzhiyun static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
488*4882a593Smuzhiyun {
489*4882a593Smuzhiyun 	ulong old_msr;
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	/* For PAPR guest, make sure MSR reflects guest mode */
492*4882a593Smuzhiyun 	if (vcpu->arch.papr_enabled)
493*4882a593Smuzhiyun 		msr = (msr & ~MSR_HV) | MSR_ME;
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun #ifdef EXIT_DEBUG
496*4882a593Smuzhiyun 	printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
497*4882a593Smuzhiyun #endif
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
500*4882a593Smuzhiyun 	/* We should never target guest MSR to TS=10 && PR=0,
501*4882a593Smuzhiyun 	 * since we always fail transaction for guest privilege
502*4882a593Smuzhiyun 	 * state.
503*4882a593Smuzhiyun 	 */
504*4882a593Smuzhiyun 	if (!(msr & MSR_PR) && MSR_TM_TRANSACTIONAL(msr))
505*4882a593Smuzhiyun 		kvmppc_emulate_tabort(vcpu,
506*4882a593Smuzhiyun 			TM_CAUSE_KVM_FAC_UNAV | TM_CAUSE_PERSISTENT);
507*4882a593Smuzhiyun #endif
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	old_msr = kvmppc_get_msr(vcpu);
510*4882a593Smuzhiyun 	msr &= to_book3s(vcpu)->msr_mask;
511*4882a593Smuzhiyun 	kvmppc_set_msr_fast(vcpu, msr);
512*4882a593Smuzhiyun 	kvmppc_recalc_shadow_msr(vcpu);
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	if (msr & MSR_POW) {
515*4882a593Smuzhiyun 		if (!vcpu->arch.pending_exceptions) {
516*4882a593Smuzhiyun 			kvm_vcpu_block(vcpu);
517*4882a593Smuzhiyun 			kvm_clear_request(KVM_REQ_UNHALT, vcpu);
518*4882a593Smuzhiyun 			vcpu->stat.halt_wakeup++;
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 			/* Unset POW bit after we woke up */
521*4882a593Smuzhiyun 			msr &= ~MSR_POW;
522*4882a593Smuzhiyun 			kvmppc_set_msr_fast(vcpu, msr);
523*4882a593Smuzhiyun 		}
524*4882a593Smuzhiyun 	}
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	if (kvmppc_is_split_real(vcpu))
527*4882a593Smuzhiyun 		kvmppc_fixup_split_real(vcpu);
528*4882a593Smuzhiyun 	else
529*4882a593Smuzhiyun 		kvmppc_unfixup_split_real(vcpu);
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) !=
532*4882a593Smuzhiyun 		   (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
533*4882a593Smuzhiyun 		kvmppc_mmu_flush_segments(vcpu);
534*4882a593Smuzhiyun 		kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 		/* Preload magic page segment when in kernel mode */
537*4882a593Smuzhiyun 		if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
538*4882a593Smuzhiyun 			struct kvm_vcpu_arch *a = &vcpu->arch;
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 			if (msr & MSR_DR)
541*4882a593Smuzhiyun 				kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
542*4882a593Smuzhiyun 			else
543*4882a593Smuzhiyun 				kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
544*4882a593Smuzhiyun 		}
545*4882a593Smuzhiyun 	}
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	/*
548*4882a593Smuzhiyun 	 * When switching from 32 to 64-bit, we may have a stale 32-bit
549*4882a593Smuzhiyun 	 * magic page around, we need to flush it. Typically 32-bit magic
550*4882a593Smuzhiyun 	 * page will be instantiated when calling into RTAS. Note: We
551*4882a593Smuzhiyun 	 * assume that such transition only happens while in kernel mode,
552*4882a593Smuzhiyun 	 * ie, we never transition from user 32-bit to kernel 64-bit with
553*4882a593Smuzhiyun 	 * a 32-bit magic page around.
554*4882a593Smuzhiyun 	 */
555*4882a593Smuzhiyun 	if (vcpu->arch.magic_page_pa &&
556*4882a593Smuzhiyun 	    !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
557*4882a593Smuzhiyun 		/* going from RTAS to normal kernel code */
558*4882a593Smuzhiyun 		kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
559*4882a593Smuzhiyun 				     ~0xFFFUL);
560*4882a593Smuzhiyun 	}
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	/* Preload FPU if it's enabled */
563*4882a593Smuzhiyun 	if (kvmppc_get_msr(vcpu) & MSR_FP)
564*4882a593Smuzhiyun 		kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
567*4882a593Smuzhiyun 	if (kvmppc_get_msr(vcpu) & MSR_TM)
568*4882a593Smuzhiyun 		kvmppc_handle_lost_math_exts(vcpu);
569*4882a593Smuzhiyun #endif
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun 
kvmppc_set_pvr_pr(struct kvm_vcpu * vcpu,u32 pvr)572*4882a593Smuzhiyun static void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
573*4882a593Smuzhiyun {
574*4882a593Smuzhiyun 	u32 host_pvr;
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
577*4882a593Smuzhiyun 	vcpu->arch.pvr = pvr;
578*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
579*4882a593Smuzhiyun 	if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
580*4882a593Smuzhiyun 		kvmppc_mmu_book3s_64_init(vcpu);
581*4882a593Smuzhiyun 		if (!to_book3s(vcpu)->hior_explicit)
582*4882a593Smuzhiyun 			to_book3s(vcpu)->hior = 0xfff00000;
583*4882a593Smuzhiyun 		to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
584*4882a593Smuzhiyun 		vcpu->arch.cpu_type = KVM_CPU_3S_64;
585*4882a593Smuzhiyun 	} else
586*4882a593Smuzhiyun #endif
587*4882a593Smuzhiyun 	{
588*4882a593Smuzhiyun 		kvmppc_mmu_book3s_32_init(vcpu);
589*4882a593Smuzhiyun 		if (!to_book3s(vcpu)->hior_explicit)
590*4882a593Smuzhiyun 			to_book3s(vcpu)->hior = 0;
591*4882a593Smuzhiyun 		to_book3s(vcpu)->msr_mask = 0xffffffffULL;
592*4882a593Smuzhiyun 		vcpu->arch.cpu_type = KVM_CPU_3S_32;
593*4882a593Smuzhiyun 	}
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	kvmppc_sanity_check(vcpu);
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	/* If we are in hypervisor level on 970, we can tell the CPU to
598*4882a593Smuzhiyun 	 * treat DCBZ as 32 bytes store */
599*4882a593Smuzhiyun 	vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
600*4882a593Smuzhiyun 	if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
601*4882a593Smuzhiyun 	    !strcmp(cur_cpu_spec->platform, "ppc970"))
602*4882a593Smuzhiyun 		vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 	/* Cell performs badly if MSR_FEx are set. So let's hope nobody
605*4882a593Smuzhiyun 	   really needs them in a VM on Cell and force disable them. */
606*4882a593Smuzhiyun 	if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
607*4882a593Smuzhiyun 		to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 	/*
610*4882a593Smuzhiyun 	 * If they're asking for POWER6 or later, set the flag
611*4882a593Smuzhiyun 	 * indicating that we can do multiple large page sizes
612*4882a593Smuzhiyun 	 * and 1TB segments.
613*4882a593Smuzhiyun 	 * Also set the flag that indicates that tlbie has the large
614*4882a593Smuzhiyun 	 * page bit in the RB operand instead of the instruction.
615*4882a593Smuzhiyun 	 */
616*4882a593Smuzhiyun 	switch (PVR_VER(pvr)) {
617*4882a593Smuzhiyun 	case PVR_POWER6:
618*4882a593Smuzhiyun 	case PVR_POWER7:
619*4882a593Smuzhiyun 	case PVR_POWER7p:
620*4882a593Smuzhiyun 	case PVR_POWER8:
621*4882a593Smuzhiyun 	case PVR_POWER8E:
622*4882a593Smuzhiyun 	case PVR_POWER8NVL:
623*4882a593Smuzhiyun 	case PVR_POWER9:
624*4882a593Smuzhiyun 		vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
625*4882a593Smuzhiyun 			BOOK3S_HFLAG_NEW_TLBIE;
626*4882a593Smuzhiyun 		break;
627*4882a593Smuzhiyun 	}
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_32
630*4882a593Smuzhiyun 	/* 32 bit Book3S always has 32 byte dcbz */
631*4882a593Smuzhiyun 	vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
632*4882a593Smuzhiyun #endif
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	/* On some CPUs we can execute paired single operations natively */
635*4882a593Smuzhiyun 	asm ( "mfpvr %0" : "=r"(host_pvr));
636*4882a593Smuzhiyun 	switch (host_pvr) {
637*4882a593Smuzhiyun 	case 0x00080200:	/* lonestar 2.0 */
638*4882a593Smuzhiyun 	case 0x00088202:	/* lonestar 2.2 */
639*4882a593Smuzhiyun 	case 0x70000100:	/* gekko 1.0 */
640*4882a593Smuzhiyun 	case 0x00080100:	/* gekko 2.0 */
641*4882a593Smuzhiyun 	case 0x00083203:	/* gekko 2.3a */
642*4882a593Smuzhiyun 	case 0x00083213:	/* gekko 2.3b */
643*4882a593Smuzhiyun 	case 0x00083204:	/* gekko 2.4 */
644*4882a593Smuzhiyun 	case 0x00083214:	/* gekko 2.4e (8SE) - retail HW2 */
645*4882a593Smuzhiyun 	case 0x00087200:	/* broadway */
646*4882a593Smuzhiyun 		vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
647*4882a593Smuzhiyun 		/* Enable HID2.PSE - in case we need it later */
648*4882a593Smuzhiyun 		mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
649*4882a593Smuzhiyun 	}
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
653*4882a593Smuzhiyun  * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
654*4882a593Smuzhiyun  * emulate 32 bytes dcbz length.
655*4882a593Smuzhiyun  *
656*4882a593Smuzhiyun  * The Book3s_64 inventors also realized this case and implemented a special bit
657*4882a593Smuzhiyun  * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
658*4882a593Smuzhiyun  *
659*4882a593Smuzhiyun  * My approach here is to patch the dcbz instruction on executing pages.
660*4882a593Smuzhiyun  */
kvmppc_patch_dcbz(struct kvm_vcpu * vcpu,struct kvmppc_pte * pte)661*4882a593Smuzhiyun static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
662*4882a593Smuzhiyun {
663*4882a593Smuzhiyun 	struct page *hpage;
664*4882a593Smuzhiyun 	u64 hpage_offset;
665*4882a593Smuzhiyun 	u32 *page;
666*4882a593Smuzhiyun 	int i;
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 	hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
669*4882a593Smuzhiyun 	if (is_error_page(hpage))
670*4882a593Smuzhiyun 		return;
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	hpage_offset = pte->raddr & ~PAGE_MASK;
673*4882a593Smuzhiyun 	hpage_offset &= ~0xFFFULL;
674*4882a593Smuzhiyun 	hpage_offset /= 4;
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 	get_page(hpage);
677*4882a593Smuzhiyun 	page = kmap_atomic(hpage);
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 	/* patch dcbz into reserved instruction, so we trap */
680*4882a593Smuzhiyun 	for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
681*4882a593Smuzhiyun 		if ((be32_to_cpu(page[i]) & 0xff0007ff) == INS_DCBZ)
682*4882a593Smuzhiyun 			page[i] &= cpu_to_be32(0xfffffff7);
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	kunmap_atomic(page);
685*4882a593Smuzhiyun 	put_page(hpage);
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun 
kvmppc_visible_gpa(struct kvm_vcpu * vcpu,gpa_t gpa)688*4882a593Smuzhiyun static bool kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
689*4882a593Smuzhiyun {
690*4882a593Smuzhiyun 	ulong mp_pa = vcpu->arch.magic_page_pa;
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 	if (!(kvmppc_get_msr(vcpu) & MSR_SF))
693*4882a593Smuzhiyun 		mp_pa = (uint32_t)mp_pa;
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun 	gpa &= ~0xFFFULL;
696*4882a593Smuzhiyun 	if (unlikely(mp_pa) && unlikely((mp_pa & KVM_PAM) == (gpa & KVM_PAM))) {
697*4882a593Smuzhiyun 		return true;
698*4882a593Smuzhiyun 	}
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT);
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun 
kvmppc_handle_pagefault(struct kvm_vcpu * vcpu,ulong eaddr,int vec)703*4882a593Smuzhiyun static int kvmppc_handle_pagefault(struct kvm_vcpu *vcpu,
704*4882a593Smuzhiyun 			    ulong eaddr, int vec)
705*4882a593Smuzhiyun {
706*4882a593Smuzhiyun 	bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
707*4882a593Smuzhiyun 	bool iswrite = false;
708*4882a593Smuzhiyun 	int r = RESUME_GUEST;
709*4882a593Smuzhiyun 	int relocated;
710*4882a593Smuzhiyun 	int page_found = 0;
711*4882a593Smuzhiyun 	struct kvmppc_pte pte = { 0 };
712*4882a593Smuzhiyun 	bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false;
713*4882a593Smuzhiyun 	bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false;
714*4882a593Smuzhiyun 	u64 vsid;
715*4882a593Smuzhiyun 
716*4882a593Smuzhiyun 	relocated = data ? dr : ir;
717*4882a593Smuzhiyun 	if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE))
718*4882a593Smuzhiyun 		iswrite = true;
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 	/* Resolve real address if translation turned on */
721*4882a593Smuzhiyun 	if (relocated) {
722*4882a593Smuzhiyun 		page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite);
723*4882a593Smuzhiyun 	} else {
724*4882a593Smuzhiyun 		pte.may_execute = true;
725*4882a593Smuzhiyun 		pte.may_read = true;
726*4882a593Smuzhiyun 		pte.may_write = true;
727*4882a593Smuzhiyun 		pte.raddr = eaddr & KVM_PAM;
728*4882a593Smuzhiyun 		pte.eaddr = eaddr;
729*4882a593Smuzhiyun 		pte.vpage = eaddr >> 12;
730*4882a593Smuzhiyun 		pte.page_size = MMU_PAGE_64K;
731*4882a593Smuzhiyun 		pte.wimg = HPTE_R_M;
732*4882a593Smuzhiyun 	}
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 	switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) {
735*4882a593Smuzhiyun 	case 0:
736*4882a593Smuzhiyun 		pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
737*4882a593Smuzhiyun 		break;
738*4882a593Smuzhiyun 	case MSR_DR:
739*4882a593Smuzhiyun 		if (!data &&
740*4882a593Smuzhiyun 		    (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
741*4882a593Smuzhiyun 		    ((pte.raddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
742*4882a593Smuzhiyun 			pte.raddr &= ~SPLIT_HACK_MASK;
743*4882a593Smuzhiyun 		fallthrough;
744*4882a593Smuzhiyun 	case MSR_IR:
745*4882a593Smuzhiyun 		vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun 		if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR)
748*4882a593Smuzhiyun 			pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
749*4882a593Smuzhiyun 		else
750*4882a593Smuzhiyun 			pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
751*4882a593Smuzhiyun 		pte.vpage |= vsid;
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 		if (vsid == -1)
754*4882a593Smuzhiyun 			page_found = -EINVAL;
755*4882a593Smuzhiyun 		break;
756*4882a593Smuzhiyun 	}
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 	if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
759*4882a593Smuzhiyun 	   (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
760*4882a593Smuzhiyun 		/*
761*4882a593Smuzhiyun 		 * If we do the dcbz hack, we have to NX on every execution,
762*4882a593Smuzhiyun 		 * so we can patch the executing code. This renders our guest
763*4882a593Smuzhiyun 		 * NX-less.
764*4882a593Smuzhiyun 		 */
765*4882a593Smuzhiyun 		pte.may_execute = !data;
766*4882a593Smuzhiyun 	}
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 	if (page_found == -ENOENT || page_found == -EPERM) {
769*4882a593Smuzhiyun 		/* Page not found in guest PTE entries, or protection fault */
770*4882a593Smuzhiyun 		u64 flags;
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 		if (page_found == -EPERM)
773*4882a593Smuzhiyun 			flags = DSISR_PROTFAULT;
774*4882a593Smuzhiyun 		else
775*4882a593Smuzhiyun 			flags = DSISR_NOHPTE;
776*4882a593Smuzhiyun 		if (data) {
777*4882a593Smuzhiyun 			flags |= vcpu->arch.fault_dsisr & DSISR_ISSTORE;
778*4882a593Smuzhiyun 			kvmppc_core_queue_data_storage(vcpu, eaddr, flags);
779*4882a593Smuzhiyun 		} else {
780*4882a593Smuzhiyun 			kvmppc_core_queue_inst_storage(vcpu, flags);
781*4882a593Smuzhiyun 		}
782*4882a593Smuzhiyun 	} else if (page_found == -EINVAL) {
783*4882a593Smuzhiyun 		/* Page not found in guest SLB */
784*4882a593Smuzhiyun 		kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
785*4882a593Smuzhiyun 		kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
786*4882a593Smuzhiyun 	} else if (kvmppc_visible_gpa(vcpu, pte.raddr)) {
787*4882a593Smuzhiyun 		if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
788*4882a593Smuzhiyun 			/*
789*4882a593Smuzhiyun 			 * There is already a host HPTE there, presumably
790*4882a593Smuzhiyun 			 * a read-only one for a page the guest thinks
791*4882a593Smuzhiyun 			 * is writable, so get rid of it first.
792*4882a593Smuzhiyun 			 */
793*4882a593Smuzhiyun 			kvmppc_mmu_unmap_page(vcpu, &pte);
794*4882a593Smuzhiyun 		}
795*4882a593Smuzhiyun 		/* The guest's PTE is not mapped yet. Map on the host */
796*4882a593Smuzhiyun 		if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) {
797*4882a593Smuzhiyun 			/* Exit KVM if mapping failed */
798*4882a593Smuzhiyun 			vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
799*4882a593Smuzhiyun 			return RESUME_HOST;
800*4882a593Smuzhiyun 		}
801*4882a593Smuzhiyun 		if (data)
802*4882a593Smuzhiyun 			vcpu->stat.sp_storage++;
803*4882a593Smuzhiyun 		else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
804*4882a593Smuzhiyun 			 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
805*4882a593Smuzhiyun 			kvmppc_patch_dcbz(vcpu, &pte);
806*4882a593Smuzhiyun 	} else {
807*4882a593Smuzhiyun 		/* MMIO */
808*4882a593Smuzhiyun 		vcpu->stat.mmio_exits++;
809*4882a593Smuzhiyun 		vcpu->arch.paddr_accessed = pte.raddr;
810*4882a593Smuzhiyun 		vcpu->arch.vaddr_accessed = pte.eaddr;
811*4882a593Smuzhiyun 		r = kvmppc_emulate_mmio(vcpu);
812*4882a593Smuzhiyun 		if ( r == RESUME_HOST_NV )
813*4882a593Smuzhiyun 			r = RESUME_HOST;
814*4882a593Smuzhiyun 	}
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 	return r;
817*4882a593Smuzhiyun }
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun /* Give up external provider (FPU, Altivec, VSX) */
kvmppc_giveup_ext(struct kvm_vcpu * vcpu,ulong msr)820*4882a593Smuzhiyun void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
821*4882a593Smuzhiyun {
822*4882a593Smuzhiyun 	struct thread_struct *t = &current->thread;
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun 	/*
825*4882a593Smuzhiyun 	 * VSX instructions can access FP and vector registers, so if
826*4882a593Smuzhiyun 	 * we are giving up VSX, make sure we give up FP and VMX as well.
827*4882a593Smuzhiyun 	 */
828*4882a593Smuzhiyun 	if (msr & MSR_VSX)
829*4882a593Smuzhiyun 		msr |= MSR_FP | MSR_VEC;
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	msr &= vcpu->arch.guest_owned_ext;
832*4882a593Smuzhiyun 	if (!msr)
833*4882a593Smuzhiyun 		return;
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun #ifdef DEBUG_EXT
836*4882a593Smuzhiyun 	printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
837*4882a593Smuzhiyun #endif
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun 	if (msr & MSR_FP) {
840*4882a593Smuzhiyun 		/*
841*4882a593Smuzhiyun 		 * Note that on CPUs with VSX, giveup_fpu stores
842*4882a593Smuzhiyun 		 * both the traditional FP registers and the added VSX
843*4882a593Smuzhiyun 		 * registers into thread.fp_state.fpr[].
844*4882a593Smuzhiyun 		 */
845*4882a593Smuzhiyun 		if (t->regs->msr & MSR_FP)
846*4882a593Smuzhiyun 			giveup_fpu(current);
847*4882a593Smuzhiyun 		t->fp_save_area = NULL;
848*4882a593Smuzhiyun 	}
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun #ifdef CONFIG_ALTIVEC
851*4882a593Smuzhiyun 	if (msr & MSR_VEC) {
852*4882a593Smuzhiyun 		if (current->thread.regs->msr & MSR_VEC)
853*4882a593Smuzhiyun 			giveup_altivec(current);
854*4882a593Smuzhiyun 		t->vr_save_area = NULL;
855*4882a593Smuzhiyun 	}
856*4882a593Smuzhiyun #endif
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun 	vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX);
859*4882a593Smuzhiyun 	kvmppc_recalc_shadow_msr(vcpu);
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun /* Give up facility (TAR / EBB / DSCR) */
kvmppc_giveup_fac(struct kvm_vcpu * vcpu,ulong fac)863*4882a593Smuzhiyun void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac)
864*4882a593Smuzhiyun {
865*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
866*4882a593Smuzhiyun 	if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) {
867*4882a593Smuzhiyun 		/* Facility not available to the guest, ignore giveup request*/
868*4882a593Smuzhiyun 		return;
869*4882a593Smuzhiyun 	}
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun 	switch (fac) {
872*4882a593Smuzhiyun 	case FSCR_TAR_LG:
873*4882a593Smuzhiyun 		vcpu->arch.tar = mfspr(SPRN_TAR);
874*4882a593Smuzhiyun 		mtspr(SPRN_TAR, current->thread.tar);
875*4882a593Smuzhiyun 		vcpu->arch.shadow_fscr &= ~FSCR_TAR;
876*4882a593Smuzhiyun 		break;
877*4882a593Smuzhiyun 	}
878*4882a593Smuzhiyun #endif
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun /* Handle external providers (FPU, Altivec, VSX) */
kvmppc_handle_ext(struct kvm_vcpu * vcpu,unsigned int exit_nr,ulong msr)882*4882a593Smuzhiyun static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
883*4882a593Smuzhiyun 			     ulong msr)
884*4882a593Smuzhiyun {
885*4882a593Smuzhiyun 	struct thread_struct *t = &current->thread;
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 	/* When we have paired singles, we emulate in software */
888*4882a593Smuzhiyun 	if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
889*4882a593Smuzhiyun 		return RESUME_GUEST;
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun 	if (!(kvmppc_get_msr(vcpu) & msr)) {
892*4882a593Smuzhiyun 		kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
893*4882a593Smuzhiyun 		return RESUME_GUEST;
894*4882a593Smuzhiyun 	}
895*4882a593Smuzhiyun 
896*4882a593Smuzhiyun 	if (msr == MSR_VSX) {
897*4882a593Smuzhiyun 		/* No VSX?  Give an illegal instruction interrupt */
898*4882a593Smuzhiyun #ifdef CONFIG_VSX
899*4882a593Smuzhiyun 		if (!cpu_has_feature(CPU_FTR_VSX))
900*4882a593Smuzhiyun #endif
901*4882a593Smuzhiyun 		{
902*4882a593Smuzhiyun 			kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
903*4882a593Smuzhiyun 			return RESUME_GUEST;
904*4882a593Smuzhiyun 		}
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 		/*
907*4882a593Smuzhiyun 		 * We have to load up all the FP and VMX registers before
908*4882a593Smuzhiyun 		 * we can let the guest use VSX instructions.
909*4882a593Smuzhiyun 		 */
910*4882a593Smuzhiyun 		msr = MSR_FP | MSR_VEC | MSR_VSX;
911*4882a593Smuzhiyun 	}
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun 	/* See if we already own all the ext(s) needed */
914*4882a593Smuzhiyun 	msr &= ~vcpu->arch.guest_owned_ext;
915*4882a593Smuzhiyun 	if (!msr)
916*4882a593Smuzhiyun 		return RESUME_GUEST;
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun #ifdef DEBUG_EXT
919*4882a593Smuzhiyun 	printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
920*4882a593Smuzhiyun #endif
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun 	if (msr & MSR_FP) {
923*4882a593Smuzhiyun 		preempt_disable();
924*4882a593Smuzhiyun 		enable_kernel_fp();
925*4882a593Smuzhiyun 		load_fp_state(&vcpu->arch.fp);
926*4882a593Smuzhiyun 		disable_kernel_fp();
927*4882a593Smuzhiyun 		t->fp_save_area = &vcpu->arch.fp;
928*4882a593Smuzhiyun 		preempt_enable();
929*4882a593Smuzhiyun 	}
930*4882a593Smuzhiyun 
931*4882a593Smuzhiyun 	if (msr & MSR_VEC) {
932*4882a593Smuzhiyun #ifdef CONFIG_ALTIVEC
933*4882a593Smuzhiyun 		preempt_disable();
934*4882a593Smuzhiyun 		enable_kernel_altivec();
935*4882a593Smuzhiyun 		load_vr_state(&vcpu->arch.vr);
936*4882a593Smuzhiyun 		disable_kernel_altivec();
937*4882a593Smuzhiyun 		t->vr_save_area = &vcpu->arch.vr;
938*4882a593Smuzhiyun 		preempt_enable();
939*4882a593Smuzhiyun #endif
940*4882a593Smuzhiyun 	}
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun 	t->regs->msr |= msr;
943*4882a593Smuzhiyun 	vcpu->arch.guest_owned_ext |= msr;
944*4882a593Smuzhiyun 	kvmppc_recalc_shadow_msr(vcpu);
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun 	return RESUME_GUEST;
947*4882a593Smuzhiyun }
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun /*
950*4882a593Smuzhiyun  * Kernel code using FP or VMX could have flushed guest state to
951*4882a593Smuzhiyun  * the thread_struct; if so, get it back now.
952*4882a593Smuzhiyun  */
kvmppc_handle_lost_ext(struct kvm_vcpu * vcpu)953*4882a593Smuzhiyun static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
954*4882a593Smuzhiyun {
955*4882a593Smuzhiyun 	unsigned long lost_ext;
956*4882a593Smuzhiyun 
957*4882a593Smuzhiyun 	lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr;
958*4882a593Smuzhiyun 	if (!lost_ext)
959*4882a593Smuzhiyun 		return;
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun 	if (lost_ext & MSR_FP) {
962*4882a593Smuzhiyun 		preempt_disable();
963*4882a593Smuzhiyun 		enable_kernel_fp();
964*4882a593Smuzhiyun 		load_fp_state(&vcpu->arch.fp);
965*4882a593Smuzhiyun 		disable_kernel_fp();
966*4882a593Smuzhiyun 		preempt_enable();
967*4882a593Smuzhiyun 	}
968*4882a593Smuzhiyun #ifdef CONFIG_ALTIVEC
969*4882a593Smuzhiyun 	if (lost_ext & MSR_VEC) {
970*4882a593Smuzhiyun 		preempt_disable();
971*4882a593Smuzhiyun 		enable_kernel_altivec();
972*4882a593Smuzhiyun 		load_vr_state(&vcpu->arch.vr);
973*4882a593Smuzhiyun 		disable_kernel_altivec();
974*4882a593Smuzhiyun 		preempt_enable();
975*4882a593Smuzhiyun 	}
976*4882a593Smuzhiyun #endif
977*4882a593Smuzhiyun 	current->thread.regs->msr |= lost_ext;
978*4882a593Smuzhiyun }
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
981*4882a593Smuzhiyun 
kvmppc_trigger_fac_interrupt(struct kvm_vcpu * vcpu,ulong fac)982*4882a593Smuzhiyun void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac)
983*4882a593Smuzhiyun {
984*4882a593Smuzhiyun 	/* Inject the Interrupt Cause field and trigger a guest interrupt */
985*4882a593Smuzhiyun 	vcpu->arch.fscr &= ~(0xffULL << 56);
986*4882a593Smuzhiyun 	vcpu->arch.fscr |= (fac << 56);
987*4882a593Smuzhiyun 	kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL);
988*4882a593Smuzhiyun }
989*4882a593Smuzhiyun 
kvmppc_emulate_fac(struct kvm_vcpu * vcpu,ulong fac)990*4882a593Smuzhiyun static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac)
991*4882a593Smuzhiyun {
992*4882a593Smuzhiyun 	enum emulation_result er = EMULATE_FAIL;
993*4882a593Smuzhiyun 
994*4882a593Smuzhiyun 	if (!(kvmppc_get_msr(vcpu) & MSR_PR))
995*4882a593Smuzhiyun 		er = kvmppc_emulate_instruction(vcpu);
996*4882a593Smuzhiyun 
997*4882a593Smuzhiyun 	if ((er != EMULATE_DONE) && (er != EMULATE_AGAIN)) {
998*4882a593Smuzhiyun 		/* Couldn't emulate, trigger interrupt in guest */
999*4882a593Smuzhiyun 		kvmppc_trigger_fac_interrupt(vcpu, fac);
1000*4882a593Smuzhiyun 	}
1001*4882a593Smuzhiyun }
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun /* Enable facilities (TAR, EBB, DSCR) for the guest */
kvmppc_handle_fac(struct kvm_vcpu * vcpu,ulong fac)1004*4882a593Smuzhiyun static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac)
1005*4882a593Smuzhiyun {
1006*4882a593Smuzhiyun 	bool guest_fac_enabled;
1007*4882a593Smuzhiyun 	BUG_ON(!cpu_has_feature(CPU_FTR_ARCH_207S));
1008*4882a593Smuzhiyun 
1009*4882a593Smuzhiyun 	/*
1010*4882a593Smuzhiyun 	 * Not every facility is enabled by FSCR bits, check whether the
1011*4882a593Smuzhiyun 	 * guest has this facility enabled at all.
1012*4882a593Smuzhiyun 	 */
1013*4882a593Smuzhiyun 	switch (fac) {
1014*4882a593Smuzhiyun 	case FSCR_TAR_LG:
1015*4882a593Smuzhiyun 	case FSCR_EBB_LG:
1016*4882a593Smuzhiyun 		guest_fac_enabled = (vcpu->arch.fscr & (1ULL << fac));
1017*4882a593Smuzhiyun 		break;
1018*4882a593Smuzhiyun 	case FSCR_TM_LG:
1019*4882a593Smuzhiyun 		guest_fac_enabled = kvmppc_get_msr(vcpu) & MSR_TM;
1020*4882a593Smuzhiyun 		break;
1021*4882a593Smuzhiyun 	default:
1022*4882a593Smuzhiyun 		guest_fac_enabled = false;
1023*4882a593Smuzhiyun 		break;
1024*4882a593Smuzhiyun 	}
1025*4882a593Smuzhiyun 
1026*4882a593Smuzhiyun 	if (!guest_fac_enabled) {
1027*4882a593Smuzhiyun 		/* Facility not enabled by the guest */
1028*4882a593Smuzhiyun 		kvmppc_trigger_fac_interrupt(vcpu, fac);
1029*4882a593Smuzhiyun 		return RESUME_GUEST;
1030*4882a593Smuzhiyun 	}
1031*4882a593Smuzhiyun 
1032*4882a593Smuzhiyun 	switch (fac) {
1033*4882a593Smuzhiyun 	case FSCR_TAR_LG:
1034*4882a593Smuzhiyun 		/* TAR switching isn't lazy in Linux yet */
1035*4882a593Smuzhiyun 		current->thread.tar = mfspr(SPRN_TAR);
1036*4882a593Smuzhiyun 		mtspr(SPRN_TAR, vcpu->arch.tar);
1037*4882a593Smuzhiyun 		vcpu->arch.shadow_fscr |= FSCR_TAR;
1038*4882a593Smuzhiyun 		break;
1039*4882a593Smuzhiyun 	default:
1040*4882a593Smuzhiyun 		kvmppc_emulate_fac(vcpu, fac);
1041*4882a593Smuzhiyun 		break;
1042*4882a593Smuzhiyun 	}
1043*4882a593Smuzhiyun 
1044*4882a593Smuzhiyun #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1045*4882a593Smuzhiyun 	/* Since we disabled MSR_TM at privilege state, the mfspr instruction
1046*4882a593Smuzhiyun 	 * for TM spr can trigger TM fac unavailable. In this case, the
1047*4882a593Smuzhiyun 	 * emulation is handled by kvmppc_emulate_fac(), which invokes
1048*4882a593Smuzhiyun 	 * kvmppc_emulate_mfspr() finally. But note the mfspr can include
1049*4882a593Smuzhiyun 	 * RT for NV registers. So it need to restore those NV reg to reflect
1050*4882a593Smuzhiyun 	 * the update.
1051*4882a593Smuzhiyun 	 */
1052*4882a593Smuzhiyun 	if ((fac == FSCR_TM_LG) && !(kvmppc_get_msr(vcpu) & MSR_PR))
1053*4882a593Smuzhiyun 		return RESUME_GUEST_NV;
1054*4882a593Smuzhiyun #endif
1055*4882a593Smuzhiyun 
1056*4882a593Smuzhiyun 	return RESUME_GUEST;
1057*4882a593Smuzhiyun }
1058*4882a593Smuzhiyun 
kvmppc_set_fscr(struct kvm_vcpu * vcpu,u64 fscr)1059*4882a593Smuzhiyun void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr)
1060*4882a593Smuzhiyun {
1061*4882a593Smuzhiyun 	if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) {
1062*4882a593Smuzhiyun 		/* TAR got dropped, drop it in shadow too */
1063*4882a593Smuzhiyun 		kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
1064*4882a593Smuzhiyun 	} else if (!(vcpu->arch.fscr & FSCR_TAR) && (fscr & FSCR_TAR)) {
1065*4882a593Smuzhiyun 		vcpu->arch.fscr = fscr;
1066*4882a593Smuzhiyun 		kvmppc_handle_fac(vcpu, FSCR_TAR_LG);
1067*4882a593Smuzhiyun 		return;
1068*4882a593Smuzhiyun 	}
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun 	vcpu->arch.fscr = fscr;
1071*4882a593Smuzhiyun }
1072*4882a593Smuzhiyun #endif
1073*4882a593Smuzhiyun 
kvmppc_setup_debug(struct kvm_vcpu * vcpu)1074*4882a593Smuzhiyun static void kvmppc_setup_debug(struct kvm_vcpu *vcpu)
1075*4882a593Smuzhiyun {
1076*4882a593Smuzhiyun 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
1077*4882a593Smuzhiyun 		u64 msr = kvmppc_get_msr(vcpu);
1078*4882a593Smuzhiyun 
1079*4882a593Smuzhiyun 		kvmppc_set_msr(vcpu, msr | MSR_SE);
1080*4882a593Smuzhiyun 	}
1081*4882a593Smuzhiyun }
1082*4882a593Smuzhiyun 
kvmppc_clear_debug(struct kvm_vcpu * vcpu)1083*4882a593Smuzhiyun static void kvmppc_clear_debug(struct kvm_vcpu *vcpu)
1084*4882a593Smuzhiyun {
1085*4882a593Smuzhiyun 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
1086*4882a593Smuzhiyun 		u64 msr = kvmppc_get_msr(vcpu);
1087*4882a593Smuzhiyun 
1088*4882a593Smuzhiyun 		kvmppc_set_msr(vcpu, msr & ~MSR_SE);
1089*4882a593Smuzhiyun 	}
1090*4882a593Smuzhiyun }
1091*4882a593Smuzhiyun 
kvmppc_exit_pr_progint(struct kvm_vcpu * vcpu,unsigned int exit_nr)1092*4882a593Smuzhiyun static int kvmppc_exit_pr_progint(struct kvm_vcpu *vcpu, unsigned int exit_nr)
1093*4882a593Smuzhiyun {
1094*4882a593Smuzhiyun 	enum emulation_result er;
1095*4882a593Smuzhiyun 	ulong flags;
1096*4882a593Smuzhiyun 	u32 last_inst;
1097*4882a593Smuzhiyun 	int emul, r;
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun 	/*
1100*4882a593Smuzhiyun 	 * shadow_srr1 only contains valid flags if we came here via a program
1101*4882a593Smuzhiyun 	 * exception. The other exceptions (emulation assist, FP unavailable,
1102*4882a593Smuzhiyun 	 * etc.) do not provide flags in SRR1, so use an illegal-instruction
1103*4882a593Smuzhiyun 	 * exception when injecting a program interrupt into the guest.
1104*4882a593Smuzhiyun 	 */
1105*4882a593Smuzhiyun 	if (exit_nr == BOOK3S_INTERRUPT_PROGRAM)
1106*4882a593Smuzhiyun 		flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
1107*4882a593Smuzhiyun 	else
1108*4882a593Smuzhiyun 		flags = SRR1_PROGILL;
1109*4882a593Smuzhiyun 
1110*4882a593Smuzhiyun 	emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
1111*4882a593Smuzhiyun 	if (emul != EMULATE_DONE)
1112*4882a593Smuzhiyun 		return RESUME_GUEST;
1113*4882a593Smuzhiyun 
1114*4882a593Smuzhiyun 	if (kvmppc_get_msr(vcpu) & MSR_PR) {
1115*4882a593Smuzhiyun #ifdef EXIT_DEBUG
1116*4882a593Smuzhiyun 		pr_info("Userspace triggered 0x700 exception at\n 0x%lx (0x%x)\n",
1117*4882a593Smuzhiyun 			kvmppc_get_pc(vcpu), last_inst);
1118*4882a593Smuzhiyun #endif
1119*4882a593Smuzhiyun 		if ((last_inst & 0xff0007ff) != (INS_DCBZ & 0xfffffff7)) {
1120*4882a593Smuzhiyun 			kvmppc_core_queue_program(vcpu, flags);
1121*4882a593Smuzhiyun 			return RESUME_GUEST;
1122*4882a593Smuzhiyun 		}
1123*4882a593Smuzhiyun 	}
1124*4882a593Smuzhiyun 
1125*4882a593Smuzhiyun 	vcpu->stat.emulated_inst_exits++;
1126*4882a593Smuzhiyun 	er = kvmppc_emulate_instruction(vcpu);
1127*4882a593Smuzhiyun 	switch (er) {
1128*4882a593Smuzhiyun 	case EMULATE_DONE:
1129*4882a593Smuzhiyun 		r = RESUME_GUEST_NV;
1130*4882a593Smuzhiyun 		break;
1131*4882a593Smuzhiyun 	case EMULATE_AGAIN:
1132*4882a593Smuzhiyun 		r = RESUME_GUEST;
1133*4882a593Smuzhiyun 		break;
1134*4882a593Smuzhiyun 	case EMULATE_FAIL:
1135*4882a593Smuzhiyun 		pr_crit("%s: emulation at %lx failed (%08x)\n",
1136*4882a593Smuzhiyun 			__func__, kvmppc_get_pc(vcpu), last_inst);
1137*4882a593Smuzhiyun 		kvmppc_core_queue_program(vcpu, flags);
1138*4882a593Smuzhiyun 		r = RESUME_GUEST;
1139*4882a593Smuzhiyun 		break;
1140*4882a593Smuzhiyun 	case EMULATE_DO_MMIO:
1141*4882a593Smuzhiyun 		vcpu->run->exit_reason = KVM_EXIT_MMIO;
1142*4882a593Smuzhiyun 		r = RESUME_HOST_NV;
1143*4882a593Smuzhiyun 		break;
1144*4882a593Smuzhiyun 	case EMULATE_EXIT_USER:
1145*4882a593Smuzhiyun 		r = RESUME_HOST_NV;
1146*4882a593Smuzhiyun 		break;
1147*4882a593Smuzhiyun 	default:
1148*4882a593Smuzhiyun 		BUG();
1149*4882a593Smuzhiyun 	}
1150*4882a593Smuzhiyun 
1151*4882a593Smuzhiyun 	return r;
1152*4882a593Smuzhiyun }
1153*4882a593Smuzhiyun 
kvmppc_handle_exit_pr(struct kvm_vcpu * vcpu,unsigned int exit_nr)1154*4882a593Smuzhiyun int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr)
1155*4882a593Smuzhiyun {
1156*4882a593Smuzhiyun 	struct kvm_run *run = vcpu->run;
1157*4882a593Smuzhiyun 	int r = RESUME_HOST;
1158*4882a593Smuzhiyun 	int s;
1159*4882a593Smuzhiyun 
1160*4882a593Smuzhiyun 	vcpu->stat.sum_exits++;
1161*4882a593Smuzhiyun 
1162*4882a593Smuzhiyun 	run->exit_reason = KVM_EXIT_UNKNOWN;
1163*4882a593Smuzhiyun 	run->ready_for_interrupt_injection = 1;
1164*4882a593Smuzhiyun 
1165*4882a593Smuzhiyun 	/* We get here with MSR.EE=1 */
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun 	trace_kvm_exit(exit_nr, vcpu);
1168*4882a593Smuzhiyun 	guest_exit();
1169*4882a593Smuzhiyun 
1170*4882a593Smuzhiyun 	switch (exit_nr) {
1171*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_INST_STORAGE:
1172*4882a593Smuzhiyun 	{
1173*4882a593Smuzhiyun 		ulong shadow_srr1 = vcpu->arch.shadow_srr1;
1174*4882a593Smuzhiyun 		vcpu->stat.pf_instruc++;
1175*4882a593Smuzhiyun 
1176*4882a593Smuzhiyun 		if (kvmppc_is_split_real(vcpu))
1177*4882a593Smuzhiyun 			kvmppc_fixup_split_real(vcpu);
1178*4882a593Smuzhiyun 
1179*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_32
1180*4882a593Smuzhiyun 		/* We set segments as unused segments when invalidating them. So
1181*4882a593Smuzhiyun 		 * treat the respective fault as segment fault. */
1182*4882a593Smuzhiyun 		{
1183*4882a593Smuzhiyun 			struct kvmppc_book3s_shadow_vcpu *svcpu;
1184*4882a593Smuzhiyun 			u32 sr;
1185*4882a593Smuzhiyun 
1186*4882a593Smuzhiyun 			svcpu = svcpu_get(vcpu);
1187*4882a593Smuzhiyun 			sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT];
1188*4882a593Smuzhiyun 			svcpu_put(svcpu);
1189*4882a593Smuzhiyun 			if (sr == SR_INVALID) {
1190*4882a593Smuzhiyun 				kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
1191*4882a593Smuzhiyun 				r = RESUME_GUEST;
1192*4882a593Smuzhiyun 				break;
1193*4882a593Smuzhiyun 			}
1194*4882a593Smuzhiyun 		}
1195*4882a593Smuzhiyun #endif
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun 		/* only care about PTEG not found errors, but leave NX alone */
1198*4882a593Smuzhiyun 		if (shadow_srr1 & 0x40000000) {
1199*4882a593Smuzhiyun 			int idx = srcu_read_lock(&vcpu->kvm->srcu);
1200*4882a593Smuzhiyun 			r = kvmppc_handle_pagefault(vcpu, kvmppc_get_pc(vcpu), exit_nr);
1201*4882a593Smuzhiyun 			srcu_read_unlock(&vcpu->kvm->srcu, idx);
1202*4882a593Smuzhiyun 			vcpu->stat.sp_instruc++;
1203*4882a593Smuzhiyun 		} else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
1204*4882a593Smuzhiyun 			  (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
1205*4882a593Smuzhiyun 			/*
1206*4882a593Smuzhiyun 			 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
1207*4882a593Smuzhiyun 			 *     so we can't use the NX bit inside the guest. Let's cross our fingers,
1208*4882a593Smuzhiyun 			 *     that no guest that needs the dcbz hack does NX.
1209*4882a593Smuzhiyun 			 */
1210*4882a593Smuzhiyun 			kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
1211*4882a593Smuzhiyun 			r = RESUME_GUEST;
1212*4882a593Smuzhiyun 		} else {
1213*4882a593Smuzhiyun 			kvmppc_core_queue_inst_storage(vcpu,
1214*4882a593Smuzhiyun 						shadow_srr1 & 0x58000000);
1215*4882a593Smuzhiyun 			r = RESUME_GUEST;
1216*4882a593Smuzhiyun 		}
1217*4882a593Smuzhiyun 		break;
1218*4882a593Smuzhiyun 	}
1219*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_DATA_STORAGE:
1220*4882a593Smuzhiyun 	{
1221*4882a593Smuzhiyun 		ulong dar = kvmppc_get_fault_dar(vcpu);
1222*4882a593Smuzhiyun 		u32 fault_dsisr = vcpu->arch.fault_dsisr;
1223*4882a593Smuzhiyun 		vcpu->stat.pf_storage++;
1224*4882a593Smuzhiyun 
1225*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_32
1226*4882a593Smuzhiyun 		/* We set segments as unused segments when invalidating them. So
1227*4882a593Smuzhiyun 		 * treat the respective fault as segment fault. */
1228*4882a593Smuzhiyun 		{
1229*4882a593Smuzhiyun 			struct kvmppc_book3s_shadow_vcpu *svcpu;
1230*4882a593Smuzhiyun 			u32 sr;
1231*4882a593Smuzhiyun 
1232*4882a593Smuzhiyun 			svcpu = svcpu_get(vcpu);
1233*4882a593Smuzhiyun 			sr = svcpu->sr[dar >> SID_SHIFT];
1234*4882a593Smuzhiyun 			svcpu_put(svcpu);
1235*4882a593Smuzhiyun 			if (sr == SR_INVALID) {
1236*4882a593Smuzhiyun 				kvmppc_mmu_map_segment(vcpu, dar);
1237*4882a593Smuzhiyun 				r = RESUME_GUEST;
1238*4882a593Smuzhiyun 				break;
1239*4882a593Smuzhiyun 			}
1240*4882a593Smuzhiyun 		}
1241*4882a593Smuzhiyun #endif
1242*4882a593Smuzhiyun 
1243*4882a593Smuzhiyun 		/*
1244*4882a593Smuzhiyun 		 * We need to handle missing shadow PTEs, and
1245*4882a593Smuzhiyun 		 * protection faults due to us mapping a page read-only
1246*4882a593Smuzhiyun 		 * when the guest thinks it is writable.
1247*4882a593Smuzhiyun 		 */
1248*4882a593Smuzhiyun 		if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) {
1249*4882a593Smuzhiyun 			int idx = srcu_read_lock(&vcpu->kvm->srcu);
1250*4882a593Smuzhiyun 			r = kvmppc_handle_pagefault(vcpu, dar, exit_nr);
1251*4882a593Smuzhiyun 			srcu_read_unlock(&vcpu->kvm->srcu, idx);
1252*4882a593Smuzhiyun 		} else {
1253*4882a593Smuzhiyun 			kvmppc_core_queue_data_storage(vcpu, dar, fault_dsisr);
1254*4882a593Smuzhiyun 			r = RESUME_GUEST;
1255*4882a593Smuzhiyun 		}
1256*4882a593Smuzhiyun 		break;
1257*4882a593Smuzhiyun 	}
1258*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_DATA_SEGMENT:
1259*4882a593Smuzhiyun 		if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
1260*4882a593Smuzhiyun 			kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
1261*4882a593Smuzhiyun 			kvmppc_book3s_queue_irqprio(vcpu,
1262*4882a593Smuzhiyun 				BOOK3S_INTERRUPT_DATA_SEGMENT);
1263*4882a593Smuzhiyun 		}
1264*4882a593Smuzhiyun 		r = RESUME_GUEST;
1265*4882a593Smuzhiyun 		break;
1266*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_INST_SEGMENT:
1267*4882a593Smuzhiyun 		if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
1268*4882a593Smuzhiyun 			kvmppc_book3s_queue_irqprio(vcpu,
1269*4882a593Smuzhiyun 				BOOK3S_INTERRUPT_INST_SEGMENT);
1270*4882a593Smuzhiyun 		}
1271*4882a593Smuzhiyun 		r = RESUME_GUEST;
1272*4882a593Smuzhiyun 		break;
1273*4882a593Smuzhiyun 	/* We're good on these - the host merely wanted to get our attention */
1274*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_DECREMENTER:
1275*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_HV_DECREMENTER:
1276*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_DOORBELL:
1277*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_H_DOORBELL:
1278*4882a593Smuzhiyun 		vcpu->stat.dec_exits++;
1279*4882a593Smuzhiyun 		r = RESUME_GUEST;
1280*4882a593Smuzhiyun 		break;
1281*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_EXTERNAL:
1282*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_EXTERNAL_HV:
1283*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_H_VIRT:
1284*4882a593Smuzhiyun 		vcpu->stat.ext_intr_exits++;
1285*4882a593Smuzhiyun 		r = RESUME_GUEST;
1286*4882a593Smuzhiyun 		break;
1287*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_HMI:
1288*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_PERFMON:
1289*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_SYSTEM_RESET:
1290*4882a593Smuzhiyun 		r = RESUME_GUEST;
1291*4882a593Smuzhiyun 		break;
1292*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_PROGRAM:
1293*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
1294*4882a593Smuzhiyun 		r = kvmppc_exit_pr_progint(vcpu, exit_nr);
1295*4882a593Smuzhiyun 		break;
1296*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_SYSCALL:
1297*4882a593Smuzhiyun 	{
1298*4882a593Smuzhiyun 		u32 last_sc;
1299*4882a593Smuzhiyun 		int emul;
1300*4882a593Smuzhiyun 
1301*4882a593Smuzhiyun 		/* Get last sc for papr */
1302*4882a593Smuzhiyun 		if (vcpu->arch.papr_enabled) {
1303*4882a593Smuzhiyun 			/* The sc instuction points SRR0 to the next inst */
1304*4882a593Smuzhiyun 			emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc);
1305*4882a593Smuzhiyun 			if (emul != EMULATE_DONE) {
1306*4882a593Smuzhiyun 				kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4);
1307*4882a593Smuzhiyun 				r = RESUME_GUEST;
1308*4882a593Smuzhiyun 				break;
1309*4882a593Smuzhiyun 			}
1310*4882a593Smuzhiyun 		}
1311*4882a593Smuzhiyun 
1312*4882a593Smuzhiyun 		if (vcpu->arch.papr_enabled &&
1313*4882a593Smuzhiyun 		    (last_sc == 0x44000022) &&
1314*4882a593Smuzhiyun 		    !(kvmppc_get_msr(vcpu) & MSR_PR)) {
1315*4882a593Smuzhiyun 			/* SC 1 papr hypercalls */
1316*4882a593Smuzhiyun 			ulong cmd = kvmppc_get_gpr(vcpu, 3);
1317*4882a593Smuzhiyun 			int i;
1318*4882a593Smuzhiyun 
1319*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
1320*4882a593Smuzhiyun 			if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
1321*4882a593Smuzhiyun 				r = RESUME_GUEST;
1322*4882a593Smuzhiyun 				break;
1323*4882a593Smuzhiyun 			}
1324*4882a593Smuzhiyun #endif
1325*4882a593Smuzhiyun 
1326*4882a593Smuzhiyun 			run->papr_hcall.nr = cmd;
1327*4882a593Smuzhiyun 			for (i = 0; i < 9; ++i) {
1328*4882a593Smuzhiyun 				ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
1329*4882a593Smuzhiyun 				run->papr_hcall.args[i] = gpr;
1330*4882a593Smuzhiyun 			}
1331*4882a593Smuzhiyun 			run->exit_reason = KVM_EXIT_PAPR_HCALL;
1332*4882a593Smuzhiyun 			vcpu->arch.hcall_needed = 1;
1333*4882a593Smuzhiyun 			r = RESUME_HOST;
1334*4882a593Smuzhiyun 		} else if (vcpu->arch.osi_enabled &&
1335*4882a593Smuzhiyun 		    (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
1336*4882a593Smuzhiyun 		    (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
1337*4882a593Smuzhiyun 			/* MOL hypercalls */
1338*4882a593Smuzhiyun 			u64 *gprs = run->osi.gprs;
1339*4882a593Smuzhiyun 			int i;
1340*4882a593Smuzhiyun 
1341*4882a593Smuzhiyun 			run->exit_reason = KVM_EXIT_OSI;
1342*4882a593Smuzhiyun 			for (i = 0; i < 32; i++)
1343*4882a593Smuzhiyun 				gprs[i] = kvmppc_get_gpr(vcpu, i);
1344*4882a593Smuzhiyun 			vcpu->arch.osi_needed = 1;
1345*4882a593Smuzhiyun 			r = RESUME_HOST_NV;
1346*4882a593Smuzhiyun 		} else if (!(kvmppc_get_msr(vcpu) & MSR_PR) &&
1347*4882a593Smuzhiyun 		    (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1348*4882a593Smuzhiyun 			/* KVM PV hypercalls */
1349*4882a593Smuzhiyun 			kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1350*4882a593Smuzhiyun 			r = RESUME_GUEST;
1351*4882a593Smuzhiyun 		} else {
1352*4882a593Smuzhiyun 			/* Guest syscalls */
1353*4882a593Smuzhiyun 			vcpu->stat.syscall_exits++;
1354*4882a593Smuzhiyun 			kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1355*4882a593Smuzhiyun 			r = RESUME_GUEST;
1356*4882a593Smuzhiyun 		}
1357*4882a593Smuzhiyun 		break;
1358*4882a593Smuzhiyun 	}
1359*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_FP_UNAVAIL:
1360*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_ALTIVEC:
1361*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_VSX:
1362*4882a593Smuzhiyun 	{
1363*4882a593Smuzhiyun 		int ext_msr = 0;
1364*4882a593Smuzhiyun 		int emul;
1365*4882a593Smuzhiyun 		u32 last_inst;
1366*4882a593Smuzhiyun 
1367*4882a593Smuzhiyun 		if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) {
1368*4882a593Smuzhiyun 			/* Do paired single instruction emulation */
1369*4882a593Smuzhiyun 			emul = kvmppc_get_last_inst(vcpu, INST_GENERIC,
1370*4882a593Smuzhiyun 						    &last_inst);
1371*4882a593Smuzhiyun 			if (emul == EMULATE_DONE)
1372*4882a593Smuzhiyun 				r = kvmppc_exit_pr_progint(vcpu, exit_nr);
1373*4882a593Smuzhiyun 			else
1374*4882a593Smuzhiyun 				r = RESUME_GUEST;
1375*4882a593Smuzhiyun 
1376*4882a593Smuzhiyun 			break;
1377*4882a593Smuzhiyun 		}
1378*4882a593Smuzhiyun 
1379*4882a593Smuzhiyun 		/* Enable external provider */
1380*4882a593Smuzhiyun 		switch (exit_nr) {
1381*4882a593Smuzhiyun 		case BOOK3S_INTERRUPT_FP_UNAVAIL:
1382*4882a593Smuzhiyun 			ext_msr = MSR_FP;
1383*4882a593Smuzhiyun 			break;
1384*4882a593Smuzhiyun 
1385*4882a593Smuzhiyun 		case BOOK3S_INTERRUPT_ALTIVEC:
1386*4882a593Smuzhiyun 			ext_msr = MSR_VEC;
1387*4882a593Smuzhiyun 			break;
1388*4882a593Smuzhiyun 
1389*4882a593Smuzhiyun 		case BOOK3S_INTERRUPT_VSX:
1390*4882a593Smuzhiyun 			ext_msr = MSR_VSX;
1391*4882a593Smuzhiyun 			break;
1392*4882a593Smuzhiyun 		}
1393*4882a593Smuzhiyun 
1394*4882a593Smuzhiyun 		r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
1395*4882a593Smuzhiyun 		break;
1396*4882a593Smuzhiyun 	}
1397*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_ALIGNMENT:
1398*4882a593Smuzhiyun 	{
1399*4882a593Smuzhiyun 		u32 last_inst;
1400*4882a593Smuzhiyun 		int emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
1401*4882a593Smuzhiyun 
1402*4882a593Smuzhiyun 		if (emul == EMULATE_DONE) {
1403*4882a593Smuzhiyun 			u32 dsisr;
1404*4882a593Smuzhiyun 			u64 dar;
1405*4882a593Smuzhiyun 
1406*4882a593Smuzhiyun 			dsisr = kvmppc_alignment_dsisr(vcpu, last_inst);
1407*4882a593Smuzhiyun 			dar = kvmppc_alignment_dar(vcpu, last_inst);
1408*4882a593Smuzhiyun 
1409*4882a593Smuzhiyun 			kvmppc_set_dsisr(vcpu, dsisr);
1410*4882a593Smuzhiyun 			kvmppc_set_dar(vcpu, dar);
1411*4882a593Smuzhiyun 
1412*4882a593Smuzhiyun 			kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1413*4882a593Smuzhiyun 		}
1414*4882a593Smuzhiyun 		r = RESUME_GUEST;
1415*4882a593Smuzhiyun 		break;
1416*4882a593Smuzhiyun 	}
1417*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
1418*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_FAC_UNAVAIL:
1419*4882a593Smuzhiyun 		r = kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56);
1420*4882a593Smuzhiyun 		break;
1421*4882a593Smuzhiyun #endif
1422*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_MACHINE_CHECK:
1423*4882a593Smuzhiyun 		kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1424*4882a593Smuzhiyun 		r = RESUME_GUEST;
1425*4882a593Smuzhiyun 		break;
1426*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_TRACE:
1427*4882a593Smuzhiyun 		if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
1428*4882a593Smuzhiyun 			run->exit_reason = KVM_EXIT_DEBUG;
1429*4882a593Smuzhiyun 			r = RESUME_HOST;
1430*4882a593Smuzhiyun 		} else {
1431*4882a593Smuzhiyun 			kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1432*4882a593Smuzhiyun 			r = RESUME_GUEST;
1433*4882a593Smuzhiyun 		}
1434*4882a593Smuzhiyun 		break;
1435*4882a593Smuzhiyun 	default:
1436*4882a593Smuzhiyun 	{
1437*4882a593Smuzhiyun 		ulong shadow_srr1 = vcpu->arch.shadow_srr1;
1438*4882a593Smuzhiyun 		/* Ugh - bork here! What did we get? */
1439*4882a593Smuzhiyun 		printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
1440*4882a593Smuzhiyun 			exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
1441*4882a593Smuzhiyun 		r = RESUME_HOST;
1442*4882a593Smuzhiyun 		BUG();
1443*4882a593Smuzhiyun 		break;
1444*4882a593Smuzhiyun 	}
1445*4882a593Smuzhiyun 	}
1446*4882a593Smuzhiyun 
1447*4882a593Smuzhiyun 	if (!(r & RESUME_HOST)) {
1448*4882a593Smuzhiyun 		/* To avoid clobbering exit_reason, only check for signals if
1449*4882a593Smuzhiyun 		 * we aren't already exiting to userspace for some other
1450*4882a593Smuzhiyun 		 * reason. */
1451*4882a593Smuzhiyun 
1452*4882a593Smuzhiyun 		/*
1453*4882a593Smuzhiyun 		 * Interrupts could be timers for the guest which we have to
1454*4882a593Smuzhiyun 		 * inject again, so let's postpone them until we're in the guest
1455*4882a593Smuzhiyun 		 * and if we really did time things so badly, then we just exit
1456*4882a593Smuzhiyun 		 * again due to a host external interrupt.
1457*4882a593Smuzhiyun 		 */
1458*4882a593Smuzhiyun 		s = kvmppc_prepare_to_enter(vcpu);
1459*4882a593Smuzhiyun 		if (s <= 0)
1460*4882a593Smuzhiyun 			r = s;
1461*4882a593Smuzhiyun 		else {
1462*4882a593Smuzhiyun 			/* interrupts now hard-disabled */
1463*4882a593Smuzhiyun 			kvmppc_fix_ee_before_entry();
1464*4882a593Smuzhiyun 		}
1465*4882a593Smuzhiyun 
1466*4882a593Smuzhiyun 		kvmppc_handle_lost_ext(vcpu);
1467*4882a593Smuzhiyun 	}
1468*4882a593Smuzhiyun 
1469*4882a593Smuzhiyun 	trace_kvm_book3s_reenter(r, vcpu);
1470*4882a593Smuzhiyun 
1471*4882a593Smuzhiyun 	return r;
1472*4882a593Smuzhiyun }
1473*4882a593Smuzhiyun 
kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1474*4882a593Smuzhiyun static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu,
1475*4882a593Smuzhiyun 					    struct kvm_sregs *sregs)
1476*4882a593Smuzhiyun {
1477*4882a593Smuzhiyun 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1478*4882a593Smuzhiyun 	int i;
1479*4882a593Smuzhiyun 
1480*4882a593Smuzhiyun 	sregs->pvr = vcpu->arch.pvr;
1481*4882a593Smuzhiyun 
1482*4882a593Smuzhiyun 	sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
1483*4882a593Smuzhiyun 	if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1484*4882a593Smuzhiyun 		for (i = 0; i < 64; i++) {
1485*4882a593Smuzhiyun 			sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
1486*4882a593Smuzhiyun 			sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1487*4882a593Smuzhiyun 		}
1488*4882a593Smuzhiyun 	} else {
1489*4882a593Smuzhiyun 		for (i = 0; i < 16; i++)
1490*4882a593Smuzhiyun 			sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i);
1491*4882a593Smuzhiyun 
1492*4882a593Smuzhiyun 		for (i = 0; i < 8; i++) {
1493*4882a593Smuzhiyun 			sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
1494*4882a593Smuzhiyun 			sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
1495*4882a593Smuzhiyun 		}
1496*4882a593Smuzhiyun 	}
1497*4882a593Smuzhiyun 
1498*4882a593Smuzhiyun 	return 0;
1499*4882a593Smuzhiyun }
1500*4882a593Smuzhiyun 
kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1501*4882a593Smuzhiyun static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu,
1502*4882a593Smuzhiyun 					    struct kvm_sregs *sregs)
1503*4882a593Smuzhiyun {
1504*4882a593Smuzhiyun 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1505*4882a593Smuzhiyun 	int i;
1506*4882a593Smuzhiyun 
1507*4882a593Smuzhiyun 	kvmppc_set_pvr_pr(vcpu, sregs->pvr);
1508*4882a593Smuzhiyun 
1509*4882a593Smuzhiyun 	vcpu3s->sdr1 = sregs->u.s.sdr1;
1510*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
1511*4882a593Smuzhiyun 	if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1512*4882a593Smuzhiyun 		/* Flush all SLB entries */
1513*4882a593Smuzhiyun 		vcpu->arch.mmu.slbmte(vcpu, 0, 0);
1514*4882a593Smuzhiyun 		vcpu->arch.mmu.slbia(vcpu);
1515*4882a593Smuzhiyun 
1516*4882a593Smuzhiyun 		for (i = 0; i < 64; i++) {
1517*4882a593Smuzhiyun 			u64 rb = sregs->u.s.ppc64.slb[i].slbe;
1518*4882a593Smuzhiyun 			u64 rs = sregs->u.s.ppc64.slb[i].slbv;
1519*4882a593Smuzhiyun 
1520*4882a593Smuzhiyun 			if (rb & SLB_ESID_V)
1521*4882a593Smuzhiyun 				vcpu->arch.mmu.slbmte(vcpu, rs, rb);
1522*4882a593Smuzhiyun 		}
1523*4882a593Smuzhiyun 	} else
1524*4882a593Smuzhiyun #endif
1525*4882a593Smuzhiyun 	{
1526*4882a593Smuzhiyun 		for (i = 0; i < 16; i++) {
1527*4882a593Smuzhiyun 			vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
1528*4882a593Smuzhiyun 		}
1529*4882a593Smuzhiyun 		for (i = 0; i < 8; i++) {
1530*4882a593Smuzhiyun 			kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
1531*4882a593Smuzhiyun 				       (u32)sregs->u.s.ppc32.ibat[i]);
1532*4882a593Smuzhiyun 			kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
1533*4882a593Smuzhiyun 				       (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
1534*4882a593Smuzhiyun 			kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
1535*4882a593Smuzhiyun 				       (u32)sregs->u.s.ppc32.dbat[i]);
1536*4882a593Smuzhiyun 			kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
1537*4882a593Smuzhiyun 				       (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
1538*4882a593Smuzhiyun 		}
1539*4882a593Smuzhiyun 	}
1540*4882a593Smuzhiyun 
1541*4882a593Smuzhiyun 	/* Flush the MMU after messing with the segments */
1542*4882a593Smuzhiyun 	kvmppc_mmu_pte_flush(vcpu, 0, 0);
1543*4882a593Smuzhiyun 
1544*4882a593Smuzhiyun 	return 0;
1545*4882a593Smuzhiyun }
1546*4882a593Smuzhiyun 
kvmppc_get_one_reg_pr(struct kvm_vcpu * vcpu,u64 id,union kvmppc_one_reg * val)1547*4882a593Smuzhiyun static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1548*4882a593Smuzhiyun 				 union kvmppc_one_reg *val)
1549*4882a593Smuzhiyun {
1550*4882a593Smuzhiyun 	int r = 0;
1551*4882a593Smuzhiyun 
1552*4882a593Smuzhiyun 	switch (id) {
1553*4882a593Smuzhiyun 	case KVM_REG_PPC_DEBUG_INST:
1554*4882a593Smuzhiyun 		*val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
1555*4882a593Smuzhiyun 		break;
1556*4882a593Smuzhiyun 	case KVM_REG_PPC_HIOR:
1557*4882a593Smuzhiyun 		*val = get_reg_val(id, to_book3s(vcpu)->hior);
1558*4882a593Smuzhiyun 		break;
1559*4882a593Smuzhiyun 	case KVM_REG_PPC_VTB:
1560*4882a593Smuzhiyun 		*val = get_reg_val(id, to_book3s(vcpu)->vtb);
1561*4882a593Smuzhiyun 		break;
1562*4882a593Smuzhiyun 	case KVM_REG_PPC_LPCR:
1563*4882a593Smuzhiyun 	case KVM_REG_PPC_LPCR_64:
1564*4882a593Smuzhiyun 		/*
1565*4882a593Smuzhiyun 		 * We are only interested in the LPCR_ILE bit
1566*4882a593Smuzhiyun 		 */
1567*4882a593Smuzhiyun 		if (vcpu->arch.intr_msr & MSR_LE)
1568*4882a593Smuzhiyun 			*val = get_reg_val(id, LPCR_ILE);
1569*4882a593Smuzhiyun 		else
1570*4882a593Smuzhiyun 			*val = get_reg_val(id, 0);
1571*4882a593Smuzhiyun 		break;
1572*4882a593Smuzhiyun #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1573*4882a593Smuzhiyun 	case KVM_REG_PPC_TFHAR:
1574*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.tfhar);
1575*4882a593Smuzhiyun 		break;
1576*4882a593Smuzhiyun 	case KVM_REG_PPC_TFIAR:
1577*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.tfiar);
1578*4882a593Smuzhiyun 		break;
1579*4882a593Smuzhiyun 	case KVM_REG_PPC_TEXASR:
1580*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.texasr);
1581*4882a593Smuzhiyun 		break;
1582*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
1583*4882a593Smuzhiyun 		*val = get_reg_val(id,
1584*4882a593Smuzhiyun 				vcpu->arch.gpr_tm[id-KVM_REG_PPC_TM_GPR0]);
1585*4882a593Smuzhiyun 		break;
1586*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
1587*4882a593Smuzhiyun 	{
1588*4882a593Smuzhiyun 		int i, j;
1589*4882a593Smuzhiyun 
1590*4882a593Smuzhiyun 		i = id - KVM_REG_PPC_TM_VSR0;
1591*4882a593Smuzhiyun 		if (i < 32)
1592*4882a593Smuzhiyun 			for (j = 0; j < TS_FPRWIDTH; j++)
1593*4882a593Smuzhiyun 				val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j];
1594*4882a593Smuzhiyun 		else {
1595*4882a593Smuzhiyun 			if (cpu_has_feature(CPU_FTR_ALTIVEC))
1596*4882a593Smuzhiyun 				val->vval = vcpu->arch.vr_tm.vr[i-32];
1597*4882a593Smuzhiyun 			else
1598*4882a593Smuzhiyun 				r = -ENXIO;
1599*4882a593Smuzhiyun 		}
1600*4882a593Smuzhiyun 		break;
1601*4882a593Smuzhiyun 	}
1602*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_CR:
1603*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.cr_tm);
1604*4882a593Smuzhiyun 		break;
1605*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_XER:
1606*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.xer_tm);
1607*4882a593Smuzhiyun 		break;
1608*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_LR:
1609*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.lr_tm);
1610*4882a593Smuzhiyun 		break;
1611*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_CTR:
1612*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.ctr_tm);
1613*4882a593Smuzhiyun 		break;
1614*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_FPSCR:
1615*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.fp_tm.fpscr);
1616*4882a593Smuzhiyun 		break;
1617*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_AMR:
1618*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.amr_tm);
1619*4882a593Smuzhiyun 		break;
1620*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_PPR:
1621*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.ppr_tm);
1622*4882a593Smuzhiyun 		break;
1623*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_VRSAVE:
1624*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.vrsave_tm);
1625*4882a593Smuzhiyun 		break;
1626*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_VSCR:
1627*4882a593Smuzhiyun 		if (cpu_has_feature(CPU_FTR_ALTIVEC))
1628*4882a593Smuzhiyun 			*val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]);
1629*4882a593Smuzhiyun 		else
1630*4882a593Smuzhiyun 			r = -ENXIO;
1631*4882a593Smuzhiyun 		break;
1632*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_DSCR:
1633*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.dscr_tm);
1634*4882a593Smuzhiyun 		break;
1635*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_TAR:
1636*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.tar_tm);
1637*4882a593Smuzhiyun 		break;
1638*4882a593Smuzhiyun #endif
1639*4882a593Smuzhiyun 	default:
1640*4882a593Smuzhiyun 		r = -EINVAL;
1641*4882a593Smuzhiyun 		break;
1642*4882a593Smuzhiyun 	}
1643*4882a593Smuzhiyun 
1644*4882a593Smuzhiyun 	return r;
1645*4882a593Smuzhiyun }
1646*4882a593Smuzhiyun 
kvmppc_set_lpcr_pr(struct kvm_vcpu * vcpu,u64 new_lpcr)1647*4882a593Smuzhiyun static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr)
1648*4882a593Smuzhiyun {
1649*4882a593Smuzhiyun 	if (new_lpcr & LPCR_ILE)
1650*4882a593Smuzhiyun 		vcpu->arch.intr_msr |= MSR_LE;
1651*4882a593Smuzhiyun 	else
1652*4882a593Smuzhiyun 		vcpu->arch.intr_msr &= ~MSR_LE;
1653*4882a593Smuzhiyun }
1654*4882a593Smuzhiyun 
kvmppc_set_one_reg_pr(struct kvm_vcpu * vcpu,u64 id,union kvmppc_one_reg * val)1655*4882a593Smuzhiyun static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1656*4882a593Smuzhiyun 				 union kvmppc_one_reg *val)
1657*4882a593Smuzhiyun {
1658*4882a593Smuzhiyun 	int r = 0;
1659*4882a593Smuzhiyun 
1660*4882a593Smuzhiyun 	switch (id) {
1661*4882a593Smuzhiyun 	case KVM_REG_PPC_HIOR:
1662*4882a593Smuzhiyun 		to_book3s(vcpu)->hior = set_reg_val(id, *val);
1663*4882a593Smuzhiyun 		to_book3s(vcpu)->hior_explicit = true;
1664*4882a593Smuzhiyun 		break;
1665*4882a593Smuzhiyun 	case KVM_REG_PPC_VTB:
1666*4882a593Smuzhiyun 		to_book3s(vcpu)->vtb = set_reg_val(id, *val);
1667*4882a593Smuzhiyun 		break;
1668*4882a593Smuzhiyun 	case KVM_REG_PPC_LPCR:
1669*4882a593Smuzhiyun 	case KVM_REG_PPC_LPCR_64:
1670*4882a593Smuzhiyun 		kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val));
1671*4882a593Smuzhiyun 		break;
1672*4882a593Smuzhiyun #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1673*4882a593Smuzhiyun 	case KVM_REG_PPC_TFHAR:
1674*4882a593Smuzhiyun 		vcpu->arch.tfhar = set_reg_val(id, *val);
1675*4882a593Smuzhiyun 		break;
1676*4882a593Smuzhiyun 	case KVM_REG_PPC_TFIAR:
1677*4882a593Smuzhiyun 		vcpu->arch.tfiar = set_reg_val(id, *val);
1678*4882a593Smuzhiyun 		break;
1679*4882a593Smuzhiyun 	case KVM_REG_PPC_TEXASR:
1680*4882a593Smuzhiyun 		vcpu->arch.texasr = set_reg_val(id, *val);
1681*4882a593Smuzhiyun 		break;
1682*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
1683*4882a593Smuzhiyun 		vcpu->arch.gpr_tm[id - KVM_REG_PPC_TM_GPR0] =
1684*4882a593Smuzhiyun 			set_reg_val(id, *val);
1685*4882a593Smuzhiyun 		break;
1686*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
1687*4882a593Smuzhiyun 	{
1688*4882a593Smuzhiyun 		int i, j;
1689*4882a593Smuzhiyun 
1690*4882a593Smuzhiyun 		i = id - KVM_REG_PPC_TM_VSR0;
1691*4882a593Smuzhiyun 		if (i < 32)
1692*4882a593Smuzhiyun 			for (j = 0; j < TS_FPRWIDTH; j++)
1693*4882a593Smuzhiyun 				vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j];
1694*4882a593Smuzhiyun 		else
1695*4882a593Smuzhiyun 			if (cpu_has_feature(CPU_FTR_ALTIVEC))
1696*4882a593Smuzhiyun 				vcpu->arch.vr_tm.vr[i-32] = val->vval;
1697*4882a593Smuzhiyun 			else
1698*4882a593Smuzhiyun 				r = -ENXIO;
1699*4882a593Smuzhiyun 		break;
1700*4882a593Smuzhiyun 	}
1701*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_CR:
1702*4882a593Smuzhiyun 		vcpu->arch.cr_tm = set_reg_val(id, *val);
1703*4882a593Smuzhiyun 		break;
1704*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_XER:
1705*4882a593Smuzhiyun 		vcpu->arch.xer_tm = set_reg_val(id, *val);
1706*4882a593Smuzhiyun 		break;
1707*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_LR:
1708*4882a593Smuzhiyun 		vcpu->arch.lr_tm = set_reg_val(id, *val);
1709*4882a593Smuzhiyun 		break;
1710*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_CTR:
1711*4882a593Smuzhiyun 		vcpu->arch.ctr_tm = set_reg_val(id, *val);
1712*4882a593Smuzhiyun 		break;
1713*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_FPSCR:
1714*4882a593Smuzhiyun 		vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val);
1715*4882a593Smuzhiyun 		break;
1716*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_AMR:
1717*4882a593Smuzhiyun 		vcpu->arch.amr_tm = set_reg_val(id, *val);
1718*4882a593Smuzhiyun 		break;
1719*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_PPR:
1720*4882a593Smuzhiyun 		vcpu->arch.ppr_tm = set_reg_val(id, *val);
1721*4882a593Smuzhiyun 		break;
1722*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_VRSAVE:
1723*4882a593Smuzhiyun 		vcpu->arch.vrsave_tm = set_reg_val(id, *val);
1724*4882a593Smuzhiyun 		break;
1725*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_VSCR:
1726*4882a593Smuzhiyun 		if (cpu_has_feature(CPU_FTR_ALTIVEC))
1727*4882a593Smuzhiyun 			vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val);
1728*4882a593Smuzhiyun 		else
1729*4882a593Smuzhiyun 			r = -ENXIO;
1730*4882a593Smuzhiyun 		break;
1731*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_DSCR:
1732*4882a593Smuzhiyun 		vcpu->arch.dscr_tm = set_reg_val(id, *val);
1733*4882a593Smuzhiyun 		break;
1734*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_TAR:
1735*4882a593Smuzhiyun 		vcpu->arch.tar_tm = set_reg_val(id, *val);
1736*4882a593Smuzhiyun 		break;
1737*4882a593Smuzhiyun #endif
1738*4882a593Smuzhiyun 	default:
1739*4882a593Smuzhiyun 		r = -EINVAL;
1740*4882a593Smuzhiyun 		break;
1741*4882a593Smuzhiyun 	}
1742*4882a593Smuzhiyun 
1743*4882a593Smuzhiyun 	return r;
1744*4882a593Smuzhiyun }
1745*4882a593Smuzhiyun 
kvmppc_core_vcpu_create_pr(struct kvm_vcpu * vcpu)1746*4882a593Smuzhiyun static int kvmppc_core_vcpu_create_pr(struct kvm_vcpu *vcpu)
1747*4882a593Smuzhiyun {
1748*4882a593Smuzhiyun 	struct kvmppc_vcpu_book3s *vcpu_book3s;
1749*4882a593Smuzhiyun 	unsigned long p;
1750*4882a593Smuzhiyun 	int err;
1751*4882a593Smuzhiyun 
1752*4882a593Smuzhiyun 	err = -ENOMEM;
1753*4882a593Smuzhiyun 
1754*4882a593Smuzhiyun 	vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
1755*4882a593Smuzhiyun 	if (!vcpu_book3s)
1756*4882a593Smuzhiyun 		goto out;
1757*4882a593Smuzhiyun 	vcpu->arch.book3s = vcpu_book3s;
1758*4882a593Smuzhiyun 
1759*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1760*4882a593Smuzhiyun 	vcpu->arch.shadow_vcpu =
1761*4882a593Smuzhiyun 		kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
1762*4882a593Smuzhiyun 	if (!vcpu->arch.shadow_vcpu)
1763*4882a593Smuzhiyun 		goto free_vcpu3s;
1764*4882a593Smuzhiyun #endif
1765*4882a593Smuzhiyun 
1766*4882a593Smuzhiyun 	p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
1767*4882a593Smuzhiyun 	if (!p)
1768*4882a593Smuzhiyun 		goto free_shadow_vcpu;
1769*4882a593Smuzhiyun 	vcpu->arch.shared = (void *)p;
1770*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
1771*4882a593Smuzhiyun 	/* Always start the shared struct in native endian mode */
1772*4882a593Smuzhiyun #ifdef __BIG_ENDIAN__
1773*4882a593Smuzhiyun         vcpu->arch.shared_big_endian = true;
1774*4882a593Smuzhiyun #else
1775*4882a593Smuzhiyun         vcpu->arch.shared_big_endian = false;
1776*4882a593Smuzhiyun #endif
1777*4882a593Smuzhiyun 
1778*4882a593Smuzhiyun 	/*
1779*4882a593Smuzhiyun 	 * Default to the same as the host if we're on sufficiently
1780*4882a593Smuzhiyun 	 * recent machine that we have 1TB segments;
1781*4882a593Smuzhiyun 	 * otherwise default to PPC970FX.
1782*4882a593Smuzhiyun 	 */
1783*4882a593Smuzhiyun 	vcpu->arch.pvr = 0x3C0301;
1784*4882a593Smuzhiyun 	if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1785*4882a593Smuzhiyun 		vcpu->arch.pvr = mfspr(SPRN_PVR);
1786*4882a593Smuzhiyun 	vcpu->arch.intr_msr = MSR_SF;
1787*4882a593Smuzhiyun #else
1788*4882a593Smuzhiyun 	/* default to book3s_32 (750) */
1789*4882a593Smuzhiyun 	vcpu->arch.pvr = 0x84202;
1790*4882a593Smuzhiyun 	vcpu->arch.intr_msr = 0;
1791*4882a593Smuzhiyun #endif
1792*4882a593Smuzhiyun 	kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr);
1793*4882a593Smuzhiyun 	vcpu->arch.slb_nr = 64;
1794*4882a593Smuzhiyun 
1795*4882a593Smuzhiyun 	vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE;
1796*4882a593Smuzhiyun 
1797*4882a593Smuzhiyun 	err = kvmppc_mmu_init_pr(vcpu);
1798*4882a593Smuzhiyun 	if (err < 0)
1799*4882a593Smuzhiyun 		goto free_shared_page;
1800*4882a593Smuzhiyun 
1801*4882a593Smuzhiyun 	return 0;
1802*4882a593Smuzhiyun 
1803*4882a593Smuzhiyun free_shared_page:
1804*4882a593Smuzhiyun 	free_page((unsigned long)vcpu->arch.shared);
1805*4882a593Smuzhiyun free_shadow_vcpu:
1806*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1807*4882a593Smuzhiyun 	kfree(vcpu->arch.shadow_vcpu);
1808*4882a593Smuzhiyun free_vcpu3s:
1809*4882a593Smuzhiyun #endif
1810*4882a593Smuzhiyun 	vfree(vcpu_book3s);
1811*4882a593Smuzhiyun out:
1812*4882a593Smuzhiyun 	return err;
1813*4882a593Smuzhiyun }
1814*4882a593Smuzhiyun 
kvmppc_core_vcpu_free_pr(struct kvm_vcpu * vcpu)1815*4882a593Smuzhiyun static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
1816*4882a593Smuzhiyun {
1817*4882a593Smuzhiyun 	struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1818*4882a593Smuzhiyun 
1819*4882a593Smuzhiyun 	kvmppc_mmu_destroy_pr(vcpu);
1820*4882a593Smuzhiyun 	free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1821*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1822*4882a593Smuzhiyun 	kfree(vcpu->arch.shadow_vcpu);
1823*4882a593Smuzhiyun #endif
1824*4882a593Smuzhiyun 	vfree(vcpu_book3s);
1825*4882a593Smuzhiyun }
1826*4882a593Smuzhiyun 
kvmppc_vcpu_run_pr(struct kvm_vcpu * vcpu)1827*4882a593Smuzhiyun static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)
1828*4882a593Smuzhiyun {
1829*4882a593Smuzhiyun 	int ret;
1830*4882a593Smuzhiyun 
1831*4882a593Smuzhiyun 	/* Check if we can run the vcpu at all */
1832*4882a593Smuzhiyun 	if (!vcpu->arch.sane) {
1833*4882a593Smuzhiyun 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1834*4882a593Smuzhiyun 		ret = -EINVAL;
1835*4882a593Smuzhiyun 		goto out;
1836*4882a593Smuzhiyun 	}
1837*4882a593Smuzhiyun 
1838*4882a593Smuzhiyun 	kvmppc_setup_debug(vcpu);
1839*4882a593Smuzhiyun 
1840*4882a593Smuzhiyun 	/*
1841*4882a593Smuzhiyun 	 * Interrupts could be timers for the guest which we have to inject
1842*4882a593Smuzhiyun 	 * again, so let's postpone them until we're in the guest and if we
1843*4882a593Smuzhiyun 	 * really did time things so badly, then we just exit again due to
1844*4882a593Smuzhiyun 	 * a host external interrupt.
1845*4882a593Smuzhiyun 	 */
1846*4882a593Smuzhiyun 	ret = kvmppc_prepare_to_enter(vcpu);
1847*4882a593Smuzhiyun 	if (ret <= 0)
1848*4882a593Smuzhiyun 		goto out;
1849*4882a593Smuzhiyun 	/* interrupts now hard-disabled */
1850*4882a593Smuzhiyun 
1851*4882a593Smuzhiyun 	/* Save FPU, Altivec and VSX state */
1852*4882a593Smuzhiyun 	giveup_all(current);
1853*4882a593Smuzhiyun 
1854*4882a593Smuzhiyun 	/* Preload FPU if it's enabled */
1855*4882a593Smuzhiyun 	if (kvmppc_get_msr(vcpu) & MSR_FP)
1856*4882a593Smuzhiyun 		kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1857*4882a593Smuzhiyun 
1858*4882a593Smuzhiyun 	kvmppc_fix_ee_before_entry();
1859*4882a593Smuzhiyun 
1860*4882a593Smuzhiyun 	ret = __kvmppc_vcpu_run(vcpu);
1861*4882a593Smuzhiyun 
1862*4882a593Smuzhiyun 	kvmppc_clear_debug(vcpu);
1863*4882a593Smuzhiyun 
1864*4882a593Smuzhiyun 	/* No need for guest_exit. It's done in handle_exit.
1865*4882a593Smuzhiyun 	   We also get here with interrupts enabled. */
1866*4882a593Smuzhiyun 
1867*4882a593Smuzhiyun 	/* Make sure we save the guest FPU/Altivec/VSX state */
1868*4882a593Smuzhiyun 	kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
1869*4882a593Smuzhiyun 
1870*4882a593Smuzhiyun 	/* Make sure we save the guest TAR/EBB/DSCR state */
1871*4882a593Smuzhiyun 	kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
1872*4882a593Smuzhiyun 
1873*4882a593Smuzhiyun out:
1874*4882a593Smuzhiyun 	vcpu->mode = OUTSIDE_GUEST_MODE;
1875*4882a593Smuzhiyun 	return ret;
1876*4882a593Smuzhiyun }
1877*4882a593Smuzhiyun 
1878*4882a593Smuzhiyun /*
1879*4882a593Smuzhiyun  * Get (and clear) the dirty memory log for a memory slot.
1880*4882a593Smuzhiyun  */
kvm_vm_ioctl_get_dirty_log_pr(struct kvm * kvm,struct kvm_dirty_log * log)1881*4882a593Smuzhiyun static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
1882*4882a593Smuzhiyun 					 struct kvm_dirty_log *log)
1883*4882a593Smuzhiyun {
1884*4882a593Smuzhiyun 	struct kvm_memory_slot *memslot;
1885*4882a593Smuzhiyun 	struct kvm_vcpu *vcpu;
1886*4882a593Smuzhiyun 	ulong ga, ga_end;
1887*4882a593Smuzhiyun 	int is_dirty = 0;
1888*4882a593Smuzhiyun 	int r;
1889*4882a593Smuzhiyun 	unsigned long n;
1890*4882a593Smuzhiyun 
1891*4882a593Smuzhiyun 	mutex_lock(&kvm->slots_lock);
1892*4882a593Smuzhiyun 
1893*4882a593Smuzhiyun 	r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
1894*4882a593Smuzhiyun 	if (r)
1895*4882a593Smuzhiyun 		goto out;
1896*4882a593Smuzhiyun 
1897*4882a593Smuzhiyun 	/* If nothing is dirty, don't bother messing with page tables. */
1898*4882a593Smuzhiyun 	if (is_dirty) {
1899*4882a593Smuzhiyun 		ga = memslot->base_gfn << PAGE_SHIFT;
1900*4882a593Smuzhiyun 		ga_end = ga + (memslot->npages << PAGE_SHIFT);
1901*4882a593Smuzhiyun 
1902*4882a593Smuzhiyun 		kvm_for_each_vcpu(n, vcpu, kvm)
1903*4882a593Smuzhiyun 			kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
1904*4882a593Smuzhiyun 
1905*4882a593Smuzhiyun 		n = kvm_dirty_bitmap_bytes(memslot);
1906*4882a593Smuzhiyun 		memset(memslot->dirty_bitmap, 0, n);
1907*4882a593Smuzhiyun 	}
1908*4882a593Smuzhiyun 
1909*4882a593Smuzhiyun 	r = 0;
1910*4882a593Smuzhiyun out:
1911*4882a593Smuzhiyun 	mutex_unlock(&kvm->slots_lock);
1912*4882a593Smuzhiyun 	return r;
1913*4882a593Smuzhiyun }
1914*4882a593Smuzhiyun 
kvmppc_core_flush_memslot_pr(struct kvm * kvm,struct kvm_memory_slot * memslot)1915*4882a593Smuzhiyun static void kvmppc_core_flush_memslot_pr(struct kvm *kvm,
1916*4882a593Smuzhiyun 					 struct kvm_memory_slot *memslot)
1917*4882a593Smuzhiyun {
1918*4882a593Smuzhiyun 	return;
1919*4882a593Smuzhiyun }
1920*4882a593Smuzhiyun 
kvmppc_core_prepare_memory_region_pr(struct kvm * kvm,struct kvm_memory_slot * memslot,const struct kvm_userspace_memory_region * mem,enum kvm_mr_change change)1921*4882a593Smuzhiyun static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm,
1922*4882a593Smuzhiyun 					struct kvm_memory_slot *memslot,
1923*4882a593Smuzhiyun 					const struct kvm_userspace_memory_region *mem,
1924*4882a593Smuzhiyun 					enum kvm_mr_change change)
1925*4882a593Smuzhiyun {
1926*4882a593Smuzhiyun 	return 0;
1927*4882a593Smuzhiyun }
1928*4882a593Smuzhiyun 
kvmppc_core_commit_memory_region_pr(struct kvm * kvm,const struct kvm_userspace_memory_region * mem,const struct kvm_memory_slot * old,const struct kvm_memory_slot * new,enum kvm_mr_change change)1929*4882a593Smuzhiyun static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm,
1930*4882a593Smuzhiyun 				const struct kvm_userspace_memory_region *mem,
1931*4882a593Smuzhiyun 				const struct kvm_memory_slot *old,
1932*4882a593Smuzhiyun 				const struct kvm_memory_slot *new,
1933*4882a593Smuzhiyun 				enum kvm_mr_change change)
1934*4882a593Smuzhiyun {
1935*4882a593Smuzhiyun 	return;
1936*4882a593Smuzhiyun }
1937*4882a593Smuzhiyun 
kvmppc_core_free_memslot_pr(struct kvm_memory_slot * slot)1938*4882a593Smuzhiyun static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *slot)
1939*4882a593Smuzhiyun {
1940*4882a593Smuzhiyun 	return;
1941*4882a593Smuzhiyun }
1942*4882a593Smuzhiyun 
1943*4882a593Smuzhiyun #ifdef CONFIG_PPC64
kvm_vm_ioctl_get_smmu_info_pr(struct kvm * kvm,struct kvm_ppc_smmu_info * info)1944*4882a593Smuzhiyun static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1945*4882a593Smuzhiyun 					 struct kvm_ppc_smmu_info *info)
1946*4882a593Smuzhiyun {
1947*4882a593Smuzhiyun 	long int i;
1948*4882a593Smuzhiyun 	struct kvm_vcpu *vcpu;
1949*4882a593Smuzhiyun 
1950*4882a593Smuzhiyun 	info->flags = 0;
1951*4882a593Smuzhiyun 
1952*4882a593Smuzhiyun 	/* SLB is always 64 entries */
1953*4882a593Smuzhiyun 	info->slb_size = 64;
1954*4882a593Smuzhiyun 
1955*4882a593Smuzhiyun 	/* Standard 4k base page size segment */
1956*4882a593Smuzhiyun 	info->sps[0].page_shift = 12;
1957*4882a593Smuzhiyun 	info->sps[0].slb_enc = 0;
1958*4882a593Smuzhiyun 	info->sps[0].enc[0].page_shift = 12;
1959*4882a593Smuzhiyun 	info->sps[0].enc[0].pte_enc = 0;
1960*4882a593Smuzhiyun 
1961*4882a593Smuzhiyun 	/*
1962*4882a593Smuzhiyun 	 * 64k large page size.
1963*4882a593Smuzhiyun 	 * We only want to put this in if the CPUs we're emulating
1964*4882a593Smuzhiyun 	 * support it, but unfortunately we don't have a vcpu easily
1965*4882a593Smuzhiyun 	 * to hand here to test.  Just pick the first vcpu, and if
1966*4882a593Smuzhiyun 	 * that doesn't exist yet, report the minimum capability,
1967*4882a593Smuzhiyun 	 * i.e., no 64k pages.
1968*4882a593Smuzhiyun 	 * 1T segment support goes along with 64k pages.
1969*4882a593Smuzhiyun 	 */
1970*4882a593Smuzhiyun 	i = 1;
1971*4882a593Smuzhiyun 	vcpu = kvm_get_vcpu(kvm, 0);
1972*4882a593Smuzhiyun 	if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
1973*4882a593Smuzhiyun 		info->flags = KVM_PPC_1T_SEGMENTS;
1974*4882a593Smuzhiyun 		info->sps[i].page_shift = 16;
1975*4882a593Smuzhiyun 		info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01;
1976*4882a593Smuzhiyun 		info->sps[i].enc[0].page_shift = 16;
1977*4882a593Smuzhiyun 		info->sps[i].enc[0].pte_enc = 1;
1978*4882a593Smuzhiyun 		++i;
1979*4882a593Smuzhiyun 	}
1980*4882a593Smuzhiyun 
1981*4882a593Smuzhiyun 	/* Standard 16M large page size segment */
1982*4882a593Smuzhiyun 	info->sps[i].page_shift = 24;
1983*4882a593Smuzhiyun 	info->sps[i].slb_enc = SLB_VSID_L;
1984*4882a593Smuzhiyun 	info->sps[i].enc[0].page_shift = 24;
1985*4882a593Smuzhiyun 	info->sps[i].enc[0].pte_enc = 0;
1986*4882a593Smuzhiyun 
1987*4882a593Smuzhiyun 	return 0;
1988*4882a593Smuzhiyun }
1989*4882a593Smuzhiyun 
kvm_configure_mmu_pr(struct kvm * kvm,struct kvm_ppc_mmuv3_cfg * cfg)1990*4882a593Smuzhiyun static int kvm_configure_mmu_pr(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg)
1991*4882a593Smuzhiyun {
1992*4882a593Smuzhiyun 	if (!cpu_has_feature(CPU_FTR_ARCH_300))
1993*4882a593Smuzhiyun 		return -ENODEV;
1994*4882a593Smuzhiyun 	/* Require flags and process table base and size to all be zero. */
1995*4882a593Smuzhiyun 	if (cfg->flags || cfg->process_table)
1996*4882a593Smuzhiyun 		return -EINVAL;
1997*4882a593Smuzhiyun 	return 0;
1998*4882a593Smuzhiyun }
1999*4882a593Smuzhiyun 
2000*4882a593Smuzhiyun #else
kvm_vm_ioctl_get_smmu_info_pr(struct kvm * kvm,struct kvm_ppc_smmu_info * info)2001*4882a593Smuzhiyun static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
2002*4882a593Smuzhiyun 					 struct kvm_ppc_smmu_info *info)
2003*4882a593Smuzhiyun {
2004*4882a593Smuzhiyun 	/* We should not get called */
2005*4882a593Smuzhiyun 	BUG();
2006*4882a593Smuzhiyun 	return 0;
2007*4882a593Smuzhiyun }
2008*4882a593Smuzhiyun #endif /* CONFIG_PPC64 */
2009*4882a593Smuzhiyun 
2010*4882a593Smuzhiyun static unsigned int kvm_global_user_count = 0;
2011*4882a593Smuzhiyun static DEFINE_SPINLOCK(kvm_global_user_count_lock);
2012*4882a593Smuzhiyun 
kvmppc_core_init_vm_pr(struct kvm * kvm)2013*4882a593Smuzhiyun static int kvmppc_core_init_vm_pr(struct kvm *kvm)
2014*4882a593Smuzhiyun {
2015*4882a593Smuzhiyun 	mutex_init(&kvm->arch.hpt_mutex);
2016*4882a593Smuzhiyun 
2017*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
2018*4882a593Smuzhiyun 	/* Start out with the default set of hcalls enabled */
2019*4882a593Smuzhiyun 	kvmppc_pr_init_default_hcalls(kvm);
2020*4882a593Smuzhiyun #endif
2021*4882a593Smuzhiyun 
2022*4882a593Smuzhiyun 	if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
2023*4882a593Smuzhiyun 		spin_lock(&kvm_global_user_count_lock);
2024*4882a593Smuzhiyun 		if (++kvm_global_user_count == 1)
2025*4882a593Smuzhiyun 			pseries_disable_reloc_on_exc();
2026*4882a593Smuzhiyun 		spin_unlock(&kvm_global_user_count_lock);
2027*4882a593Smuzhiyun 	}
2028*4882a593Smuzhiyun 	return 0;
2029*4882a593Smuzhiyun }
2030*4882a593Smuzhiyun 
kvmppc_core_destroy_vm_pr(struct kvm * kvm)2031*4882a593Smuzhiyun static void kvmppc_core_destroy_vm_pr(struct kvm *kvm)
2032*4882a593Smuzhiyun {
2033*4882a593Smuzhiyun #ifdef CONFIG_PPC64
2034*4882a593Smuzhiyun 	WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
2035*4882a593Smuzhiyun #endif
2036*4882a593Smuzhiyun 
2037*4882a593Smuzhiyun 	if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
2038*4882a593Smuzhiyun 		spin_lock(&kvm_global_user_count_lock);
2039*4882a593Smuzhiyun 		BUG_ON(kvm_global_user_count == 0);
2040*4882a593Smuzhiyun 		if (--kvm_global_user_count == 0)
2041*4882a593Smuzhiyun 			pseries_enable_reloc_on_exc();
2042*4882a593Smuzhiyun 		spin_unlock(&kvm_global_user_count_lock);
2043*4882a593Smuzhiyun 	}
2044*4882a593Smuzhiyun }
2045*4882a593Smuzhiyun 
kvmppc_core_check_processor_compat_pr(void)2046*4882a593Smuzhiyun static int kvmppc_core_check_processor_compat_pr(void)
2047*4882a593Smuzhiyun {
2048*4882a593Smuzhiyun 	/*
2049*4882a593Smuzhiyun 	 * PR KVM can work on POWER9 inside a guest partition
2050*4882a593Smuzhiyun 	 * running in HPT mode.  It can't work if we are using
2051*4882a593Smuzhiyun 	 * radix translation (because radix provides no way for
2052*4882a593Smuzhiyun 	 * a process to have unique translations in quadrant 3).
2053*4882a593Smuzhiyun 	 */
2054*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled())
2055*4882a593Smuzhiyun 		return -EIO;
2056*4882a593Smuzhiyun 	return 0;
2057*4882a593Smuzhiyun }
2058*4882a593Smuzhiyun 
kvm_arch_vm_ioctl_pr(struct file * filp,unsigned int ioctl,unsigned long arg)2059*4882a593Smuzhiyun static long kvm_arch_vm_ioctl_pr(struct file *filp,
2060*4882a593Smuzhiyun 				 unsigned int ioctl, unsigned long arg)
2061*4882a593Smuzhiyun {
2062*4882a593Smuzhiyun 	return -ENOTTY;
2063*4882a593Smuzhiyun }
2064*4882a593Smuzhiyun 
2065*4882a593Smuzhiyun static struct kvmppc_ops kvm_ops_pr = {
2066*4882a593Smuzhiyun 	.get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr,
2067*4882a593Smuzhiyun 	.set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr,
2068*4882a593Smuzhiyun 	.get_one_reg = kvmppc_get_one_reg_pr,
2069*4882a593Smuzhiyun 	.set_one_reg = kvmppc_set_one_reg_pr,
2070*4882a593Smuzhiyun 	.vcpu_load   = kvmppc_core_vcpu_load_pr,
2071*4882a593Smuzhiyun 	.vcpu_put    = kvmppc_core_vcpu_put_pr,
2072*4882a593Smuzhiyun 	.inject_interrupt = kvmppc_inject_interrupt_pr,
2073*4882a593Smuzhiyun 	.set_msr     = kvmppc_set_msr_pr,
2074*4882a593Smuzhiyun 	.vcpu_run    = kvmppc_vcpu_run_pr,
2075*4882a593Smuzhiyun 	.vcpu_create = kvmppc_core_vcpu_create_pr,
2076*4882a593Smuzhiyun 	.vcpu_free   = kvmppc_core_vcpu_free_pr,
2077*4882a593Smuzhiyun 	.check_requests = kvmppc_core_check_requests_pr,
2078*4882a593Smuzhiyun 	.get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr,
2079*4882a593Smuzhiyun 	.flush_memslot = kvmppc_core_flush_memslot_pr,
2080*4882a593Smuzhiyun 	.prepare_memory_region = kvmppc_core_prepare_memory_region_pr,
2081*4882a593Smuzhiyun 	.commit_memory_region = kvmppc_core_commit_memory_region_pr,
2082*4882a593Smuzhiyun 	.unmap_hva_range = kvm_unmap_hva_range_pr,
2083*4882a593Smuzhiyun 	.age_hva  = kvm_age_hva_pr,
2084*4882a593Smuzhiyun 	.test_age_hva = kvm_test_age_hva_pr,
2085*4882a593Smuzhiyun 	.set_spte_hva = kvm_set_spte_hva_pr,
2086*4882a593Smuzhiyun 	.free_memslot = kvmppc_core_free_memslot_pr,
2087*4882a593Smuzhiyun 	.init_vm = kvmppc_core_init_vm_pr,
2088*4882a593Smuzhiyun 	.destroy_vm = kvmppc_core_destroy_vm_pr,
2089*4882a593Smuzhiyun 	.get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr,
2090*4882a593Smuzhiyun 	.emulate_op = kvmppc_core_emulate_op_pr,
2091*4882a593Smuzhiyun 	.emulate_mtspr = kvmppc_core_emulate_mtspr_pr,
2092*4882a593Smuzhiyun 	.emulate_mfspr = kvmppc_core_emulate_mfspr_pr,
2093*4882a593Smuzhiyun 	.fast_vcpu_kick = kvm_vcpu_kick,
2094*4882a593Smuzhiyun 	.arch_vm_ioctl  = kvm_arch_vm_ioctl_pr,
2095*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
2096*4882a593Smuzhiyun 	.hcall_implemented = kvmppc_hcall_impl_pr,
2097*4882a593Smuzhiyun 	.configure_mmu = kvm_configure_mmu_pr,
2098*4882a593Smuzhiyun #endif
2099*4882a593Smuzhiyun 	.giveup_ext = kvmppc_giveup_ext,
2100*4882a593Smuzhiyun };
2101*4882a593Smuzhiyun 
2102*4882a593Smuzhiyun 
kvmppc_book3s_init_pr(void)2103*4882a593Smuzhiyun int kvmppc_book3s_init_pr(void)
2104*4882a593Smuzhiyun {
2105*4882a593Smuzhiyun 	int r;
2106*4882a593Smuzhiyun 
2107*4882a593Smuzhiyun 	r = kvmppc_core_check_processor_compat_pr();
2108*4882a593Smuzhiyun 	if (r < 0)
2109*4882a593Smuzhiyun 		return r;
2110*4882a593Smuzhiyun 
2111*4882a593Smuzhiyun 	kvm_ops_pr.owner = THIS_MODULE;
2112*4882a593Smuzhiyun 	kvmppc_pr_ops = &kvm_ops_pr;
2113*4882a593Smuzhiyun 
2114*4882a593Smuzhiyun 	r = kvmppc_mmu_hpte_sysinit();
2115*4882a593Smuzhiyun 	return r;
2116*4882a593Smuzhiyun }
2117*4882a593Smuzhiyun 
kvmppc_book3s_exit_pr(void)2118*4882a593Smuzhiyun void kvmppc_book3s_exit_pr(void)
2119*4882a593Smuzhiyun {
2120*4882a593Smuzhiyun 	kvmppc_pr_ops = NULL;
2121*4882a593Smuzhiyun 	kvmppc_mmu_hpte_sysexit();
2122*4882a593Smuzhiyun }
2123*4882a593Smuzhiyun 
2124*4882a593Smuzhiyun /*
2125*4882a593Smuzhiyun  * We only support separate modules for book3s 64
2126*4882a593Smuzhiyun  */
2127*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
2128*4882a593Smuzhiyun 
2129*4882a593Smuzhiyun module_init(kvmppc_book3s_init_pr);
2130*4882a593Smuzhiyun module_exit(kvmppc_book3s_exit_pr);
2131*4882a593Smuzhiyun 
2132*4882a593Smuzhiyun MODULE_LICENSE("GPL");
2133*4882a593Smuzhiyun MODULE_ALIAS_MISCDEV(KVM_MINOR);
2134*4882a593Smuzhiyun MODULE_ALIAS("devname:kvm");
2135*4882a593Smuzhiyun #endif
2136