xref: /OK3568_Linux_fs/kernel/arch/powerpc/kvm/book3s_hv.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
4*4882a593Smuzhiyun  * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Authors:
7*4882a593Smuzhiyun  *    Paul Mackerras <paulus@au1.ibm.com>
8*4882a593Smuzhiyun  *    Alexander Graf <agraf@suse.de>
9*4882a593Smuzhiyun  *    Kevin Wolf <mail@kevin-wolf.de>
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * Description: KVM functions specific to running on Book 3S
12*4882a593Smuzhiyun  * processors in hypervisor mode (specifically POWER7 and later).
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * This file is derived from arch/powerpc/kvm/book3s.c,
15*4882a593Smuzhiyun  * by Alexander Graf <agraf@suse.de>.
16*4882a593Smuzhiyun  */
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include <linux/kvm_host.h>
19*4882a593Smuzhiyun #include <linux/kernel.h>
20*4882a593Smuzhiyun #include <linux/err.h>
21*4882a593Smuzhiyun #include <linux/slab.h>
22*4882a593Smuzhiyun #include <linux/preempt.h>
23*4882a593Smuzhiyun #include <linux/sched/signal.h>
24*4882a593Smuzhiyun #include <linux/sched/stat.h>
25*4882a593Smuzhiyun #include <linux/delay.h>
26*4882a593Smuzhiyun #include <linux/export.h>
27*4882a593Smuzhiyun #include <linux/fs.h>
28*4882a593Smuzhiyun #include <linux/anon_inodes.h>
29*4882a593Smuzhiyun #include <linux/cpu.h>
30*4882a593Smuzhiyun #include <linux/cpumask.h>
31*4882a593Smuzhiyun #include <linux/spinlock.h>
32*4882a593Smuzhiyun #include <linux/page-flags.h>
33*4882a593Smuzhiyun #include <linux/srcu.h>
34*4882a593Smuzhiyun #include <linux/miscdevice.h>
35*4882a593Smuzhiyun #include <linux/debugfs.h>
36*4882a593Smuzhiyun #include <linux/gfp.h>
37*4882a593Smuzhiyun #include <linux/vmalloc.h>
38*4882a593Smuzhiyun #include <linux/highmem.h>
39*4882a593Smuzhiyun #include <linux/hugetlb.h>
40*4882a593Smuzhiyun #include <linux/kvm_irqfd.h>
41*4882a593Smuzhiyun #include <linux/irqbypass.h>
42*4882a593Smuzhiyun #include <linux/module.h>
43*4882a593Smuzhiyun #include <linux/compiler.h>
44*4882a593Smuzhiyun #include <linux/of.h>
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun #include <asm/ftrace.h>
47*4882a593Smuzhiyun #include <asm/reg.h>
48*4882a593Smuzhiyun #include <asm/ppc-opcode.h>
49*4882a593Smuzhiyun #include <asm/asm-prototypes.h>
50*4882a593Smuzhiyun #include <asm/archrandom.h>
51*4882a593Smuzhiyun #include <asm/debug.h>
52*4882a593Smuzhiyun #include <asm/disassemble.h>
53*4882a593Smuzhiyun #include <asm/cputable.h>
54*4882a593Smuzhiyun #include <asm/cacheflush.h>
55*4882a593Smuzhiyun #include <linux/uaccess.h>
56*4882a593Smuzhiyun #include <asm/io.h>
57*4882a593Smuzhiyun #include <asm/kvm_ppc.h>
58*4882a593Smuzhiyun #include <asm/kvm_book3s.h>
59*4882a593Smuzhiyun #include <asm/mmu_context.h>
60*4882a593Smuzhiyun #include <asm/lppaca.h>
61*4882a593Smuzhiyun #include <asm/pmc.h>
62*4882a593Smuzhiyun #include <asm/processor.h>
63*4882a593Smuzhiyun #include <asm/cputhreads.h>
64*4882a593Smuzhiyun #include <asm/page.h>
65*4882a593Smuzhiyun #include <asm/hvcall.h>
66*4882a593Smuzhiyun #include <asm/switch_to.h>
67*4882a593Smuzhiyun #include <asm/smp.h>
68*4882a593Smuzhiyun #include <asm/dbell.h>
69*4882a593Smuzhiyun #include <asm/hmi.h>
70*4882a593Smuzhiyun #include <asm/pnv-pci.h>
71*4882a593Smuzhiyun #include <asm/mmu.h>
72*4882a593Smuzhiyun #include <asm/opal.h>
73*4882a593Smuzhiyun #include <asm/xics.h>
74*4882a593Smuzhiyun #include <asm/xive.h>
75*4882a593Smuzhiyun #include <asm/hw_breakpoint.h>
76*4882a593Smuzhiyun #include <asm/kvm_book3s_uvmem.h>
77*4882a593Smuzhiyun #include <asm/ultravisor.h>
78*4882a593Smuzhiyun #include <asm/dtl.h>
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun #include "book3s.h"
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun #define CREATE_TRACE_POINTS
83*4882a593Smuzhiyun #include "trace_hv.h"
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun /* #define EXIT_DEBUG */
86*4882a593Smuzhiyun /* #define EXIT_DEBUG_SIMPLE */
87*4882a593Smuzhiyun /* #define EXIT_DEBUG_INT */
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun /* Used to indicate that a guest page fault needs to be handled */
90*4882a593Smuzhiyun #define RESUME_PAGE_FAULT	(RESUME_GUEST | RESUME_FLAG_ARCH1)
91*4882a593Smuzhiyun /* Used to indicate that a guest passthrough interrupt needs to be handled */
92*4882a593Smuzhiyun #define RESUME_PASSTHROUGH	(RESUME_GUEST | RESUME_FLAG_ARCH2)
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun /* Used as a "null" value for timebase values */
95*4882a593Smuzhiyun #define TB_NIL	(~(u64)0)
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun static int dynamic_mt_modes = 6;
100*4882a593Smuzhiyun module_param(dynamic_mt_modes, int, 0644);
101*4882a593Smuzhiyun MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)");
102*4882a593Smuzhiyun static int target_smt_mode;
103*4882a593Smuzhiyun module_param(target_smt_mode, int, 0644);
104*4882a593Smuzhiyun MODULE_PARM_DESC(target_smt_mode, "Target threads per core (0 = max)");
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun static bool indep_threads_mode = true;
107*4882a593Smuzhiyun module_param(indep_threads_mode, bool, S_IRUGO | S_IWUSR);
108*4882a593Smuzhiyun MODULE_PARM_DESC(indep_threads_mode, "Independent-threads mode (only on POWER9)");
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun static bool one_vm_per_core;
111*4882a593Smuzhiyun module_param(one_vm_per_core, bool, S_IRUGO | S_IWUSR);
112*4882a593Smuzhiyun MODULE_PARM_DESC(one_vm_per_core, "Only run vCPUs from the same VM on a core (requires indep_threads_mode=N)");
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun #ifdef CONFIG_KVM_XICS
115*4882a593Smuzhiyun static const struct kernel_param_ops module_param_ops = {
116*4882a593Smuzhiyun 	.set = param_set_int,
117*4882a593Smuzhiyun 	.get = param_get_int,
118*4882a593Smuzhiyun };
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun module_param_cb(kvm_irq_bypass, &module_param_ops, &kvm_irq_bypass, 0644);
121*4882a593Smuzhiyun MODULE_PARM_DESC(kvm_irq_bypass, "Bypass passthrough interrupt optimization");
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect, 0644);
124*4882a593Smuzhiyun MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core");
125*4882a593Smuzhiyun #endif
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun /* If set, guests are allowed to create and control nested guests */
128*4882a593Smuzhiyun static bool nested = true;
129*4882a593Smuzhiyun module_param(nested, bool, S_IRUGO | S_IWUSR);
130*4882a593Smuzhiyun MODULE_PARM_DESC(nested, "Enable nested virtualization (only on POWER9)");
131*4882a593Smuzhiyun 
nesting_enabled(struct kvm * kvm)132*4882a593Smuzhiyun static inline bool nesting_enabled(struct kvm *kvm)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun 	return kvm->arch.nested_enable && kvm_is_radix(kvm);
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun /* If set, the threads on each CPU core have to be in the same MMU mode */
138*4882a593Smuzhiyun static bool no_mixing_hpt_and_radix;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun /*
143*4882a593Smuzhiyun  * RWMR values for POWER8.  These control the rate at which PURR
144*4882a593Smuzhiyun  * and SPURR count and should be set according to the number of
145*4882a593Smuzhiyun  * online threads in the vcore being run.
146*4882a593Smuzhiyun  */
147*4882a593Smuzhiyun #define RWMR_RPA_P8_1THREAD	0x164520C62609AECAUL
148*4882a593Smuzhiyun #define RWMR_RPA_P8_2THREAD	0x7FFF2908450D8DA9UL
149*4882a593Smuzhiyun #define RWMR_RPA_P8_3THREAD	0x164520C62609AECAUL
150*4882a593Smuzhiyun #define RWMR_RPA_P8_4THREAD	0x199A421245058DA9UL
151*4882a593Smuzhiyun #define RWMR_RPA_P8_5THREAD	0x164520C62609AECAUL
152*4882a593Smuzhiyun #define RWMR_RPA_P8_6THREAD	0x164520C62609AECAUL
153*4882a593Smuzhiyun #define RWMR_RPA_P8_7THREAD	0x164520C62609AECAUL
154*4882a593Smuzhiyun #define RWMR_RPA_P8_8THREAD	0x164520C62609AECAUL
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun static unsigned long p8_rwmr_values[MAX_SMT_THREADS + 1] = {
157*4882a593Smuzhiyun 	RWMR_RPA_P8_1THREAD,
158*4882a593Smuzhiyun 	RWMR_RPA_P8_1THREAD,
159*4882a593Smuzhiyun 	RWMR_RPA_P8_2THREAD,
160*4882a593Smuzhiyun 	RWMR_RPA_P8_3THREAD,
161*4882a593Smuzhiyun 	RWMR_RPA_P8_4THREAD,
162*4882a593Smuzhiyun 	RWMR_RPA_P8_5THREAD,
163*4882a593Smuzhiyun 	RWMR_RPA_P8_6THREAD,
164*4882a593Smuzhiyun 	RWMR_RPA_P8_7THREAD,
165*4882a593Smuzhiyun 	RWMR_RPA_P8_8THREAD,
166*4882a593Smuzhiyun };
167*4882a593Smuzhiyun 
next_runnable_thread(struct kvmppc_vcore * vc,int * ip)168*4882a593Smuzhiyun static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc,
169*4882a593Smuzhiyun 		int *ip)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun 	int i = *ip;
172*4882a593Smuzhiyun 	struct kvm_vcpu *vcpu;
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	while (++i < MAX_SMT_THREADS) {
175*4882a593Smuzhiyun 		vcpu = READ_ONCE(vc->runnable_threads[i]);
176*4882a593Smuzhiyun 		if (vcpu) {
177*4882a593Smuzhiyun 			*ip = i;
178*4882a593Smuzhiyun 			return vcpu;
179*4882a593Smuzhiyun 		}
180*4882a593Smuzhiyun 	}
181*4882a593Smuzhiyun 	return NULL;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun /* Used to traverse the list of runnable threads for a given vcore */
185*4882a593Smuzhiyun #define for_each_runnable_thread(i, vcpu, vc) \
186*4882a593Smuzhiyun 	for (i = -1; (vcpu = next_runnable_thread(vc, &i)); )
187*4882a593Smuzhiyun 
kvmppc_ipi_thread(int cpu)188*4882a593Smuzhiyun static bool kvmppc_ipi_thread(int cpu)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun 	unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	/* If we're a nested hypervisor, fall back to ordinary IPIs for now */
193*4882a593Smuzhiyun 	if (kvmhv_on_pseries())
194*4882a593Smuzhiyun 		return false;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	/* On POWER9 we can use msgsnd to IPI any cpu */
197*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
198*4882a593Smuzhiyun 		msg |= get_hard_smp_processor_id(cpu);
199*4882a593Smuzhiyun 		smp_mb();
200*4882a593Smuzhiyun 		__asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
201*4882a593Smuzhiyun 		return true;
202*4882a593Smuzhiyun 	}
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	/* On POWER8 for IPIs to threads in the same core, use msgsnd */
205*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
206*4882a593Smuzhiyun 		preempt_disable();
207*4882a593Smuzhiyun 		if (cpu_first_thread_sibling(cpu) ==
208*4882a593Smuzhiyun 		    cpu_first_thread_sibling(smp_processor_id())) {
209*4882a593Smuzhiyun 			msg |= cpu_thread_in_core(cpu);
210*4882a593Smuzhiyun 			smp_mb();
211*4882a593Smuzhiyun 			__asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
212*4882a593Smuzhiyun 			preempt_enable();
213*4882a593Smuzhiyun 			return true;
214*4882a593Smuzhiyun 		}
215*4882a593Smuzhiyun 		preempt_enable();
216*4882a593Smuzhiyun 	}
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
219*4882a593Smuzhiyun 	if (cpu >= 0 && cpu < nr_cpu_ids) {
220*4882a593Smuzhiyun 		if (paca_ptrs[cpu]->kvm_hstate.xics_phys) {
221*4882a593Smuzhiyun 			xics_wake_cpu(cpu);
222*4882a593Smuzhiyun 			return true;
223*4882a593Smuzhiyun 		}
224*4882a593Smuzhiyun 		opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY);
225*4882a593Smuzhiyun 		return true;
226*4882a593Smuzhiyun 	}
227*4882a593Smuzhiyun #endif
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	return false;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun 
kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu * vcpu)232*4882a593Smuzhiyun static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun 	int cpu;
235*4882a593Smuzhiyun 	struct rcuwait *waitp;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	waitp = kvm_arch_vcpu_get_wait(vcpu);
238*4882a593Smuzhiyun 	if (rcuwait_wake_up(waitp))
239*4882a593Smuzhiyun 		++vcpu->stat.halt_wakeup;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	cpu = READ_ONCE(vcpu->arch.thread_cpu);
242*4882a593Smuzhiyun 	if (cpu >= 0 && kvmppc_ipi_thread(cpu))
243*4882a593Smuzhiyun 		return;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	/* CPU points to the first thread of the core */
246*4882a593Smuzhiyun 	cpu = vcpu->cpu;
247*4882a593Smuzhiyun 	if (cpu >= 0 && cpu < nr_cpu_ids && cpu_online(cpu))
248*4882a593Smuzhiyun 		smp_send_reschedule(cpu);
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun /*
252*4882a593Smuzhiyun  * We use the vcpu_load/put functions to measure stolen time.
253*4882a593Smuzhiyun  * Stolen time is counted as time when either the vcpu is able to
254*4882a593Smuzhiyun  * run as part of a virtual core, but the task running the vcore
255*4882a593Smuzhiyun  * is preempted or sleeping, or when the vcpu needs something done
256*4882a593Smuzhiyun  * in the kernel by the task running the vcpu, but that task is
257*4882a593Smuzhiyun  * preempted or sleeping.  Those two things have to be counted
258*4882a593Smuzhiyun  * separately, since one of the vcpu tasks will take on the job
259*4882a593Smuzhiyun  * of running the core, and the other vcpu tasks in the vcore will
260*4882a593Smuzhiyun  * sleep waiting for it to do that, but that sleep shouldn't count
261*4882a593Smuzhiyun  * as stolen time.
262*4882a593Smuzhiyun  *
263*4882a593Smuzhiyun  * Hence we accumulate stolen time when the vcpu can run as part of
264*4882a593Smuzhiyun  * a vcore using vc->stolen_tb, and the stolen time when the vcpu
265*4882a593Smuzhiyun  * needs its task to do other things in the kernel (for example,
266*4882a593Smuzhiyun  * service a page fault) in busy_stolen.  We don't accumulate
267*4882a593Smuzhiyun  * stolen time for a vcore when it is inactive, or for a vcpu
268*4882a593Smuzhiyun  * when it is in state RUNNING or NOTREADY.  NOTREADY is a bit of
269*4882a593Smuzhiyun  * a misnomer; it means that the vcpu task is not executing in
270*4882a593Smuzhiyun  * the KVM_VCPU_RUN ioctl, i.e. it is in userspace or elsewhere in
271*4882a593Smuzhiyun  * the kernel.  We don't have any way of dividing up that time
272*4882a593Smuzhiyun  * between time that the vcpu is genuinely stopped, time that
273*4882a593Smuzhiyun  * the task is actively working on behalf of the vcpu, and time
274*4882a593Smuzhiyun  * that the task is preempted, so we don't count any of it as
275*4882a593Smuzhiyun  * stolen.
276*4882a593Smuzhiyun  *
277*4882a593Smuzhiyun  * Updates to busy_stolen are protected by arch.tbacct_lock;
278*4882a593Smuzhiyun  * updates to vc->stolen_tb are protected by the vcore->stoltb_lock
279*4882a593Smuzhiyun  * lock.  The stolen times are measured in units of timebase ticks.
280*4882a593Smuzhiyun  * (Note that the != TB_NIL checks below are purely defensive;
281*4882a593Smuzhiyun  * they should never fail.)
282*4882a593Smuzhiyun  */
283*4882a593Smuzhiyun 
kvmppc_core_start_stolen(struct kvmppc_vcore * vc)284*4882a593Smuzhiyun static void kvmppc_core_start_stolen(struct kvmppc_vcore *vc)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun 	unsigned long flags;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	spin_lock_irqsave(&vc->stoltb_lock, flags);
289*4882a593Smuzhiyun 	vc->preempt_tb = mftb();
290*4882a593Smuzhiyun 	spin_unlock_irqrestore(&vc->stoltb_lock, flags);
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun 
kvmppc_core_end_stolen(struct kvmppc_vcore * vc)293*4882a593Smuzhiyun static void kvmppc_core_end_stolen(struct kvmppc_vcore *vc)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun 	unsigned long flags;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	spin_lock_irqsave(&vc->stoltb_lock, flags);
298*4882a593Smuzhiyun 	if (vc->preempt_tb != TB_NIL) {
299*4882a593Smuzhiyun 		vc->stolen_tb += mftb() - vc->preempt_tb;
300*4882a593Smuzhiyun 		vc->preempt_tb = TB_NIL;
301*4882a593Smuzhiyun 	}
302*4882a593Smuzhiyun 	spin_unlock_irqrestore(&vc->stoltb_lock, flags);
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun 
kvmppc_core_vcpu_load_hv(struct kvm_vcpu * vcpu,int cpu)305*4882a593Smuzhiyun static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
308*4882a593Smuzhiyun 	unsigned long flags;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	/*
311*4882a593Smuzhiyun 	 * We can test vc->runner without taking the vcore lock,
312*4882a593Smuzhiyun 	 * because only this task ever sets vc->runner to this
313*4882a593Smuzhiyun 	 * vcpu, and once it is set to this vcpu, only this task
314*4882a593Smuzhiyun 	 * ever sets it to NULL.
315*4882a593Smuzhiyun 	 */
316*4882a593Smuzhiyun 	if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
317*4882a593Smuzhiyun 		kvmppc_core_end_stolen(vc);
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
320*4882a593Smuzhiyun 	if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST &&
321*4882a593Smuzhiyun 	    vcpu->arch.busy_preempt != TB_NIL) {
322*4882a593Smuzhiyun 		vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
323*4882a593Smuzhiyun 		vcpu->arch.busy_preempt = TB_NIL;
324*4882a593Smuzhiyun 	}
325*4882a593Smuzhiyun 	spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun 
kvmppc_core_vcpu_put_hv(struct kvm_vcpu * vcpu)328*4882a593Smuzhiyun static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
331*4882a593Smuzhiyun 	unsigned long flags;
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
334*4882a593Smuzhiyun 		kvmppc_core_start_stolen(vc);
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
337*4882a593Smuzhiyun 	if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
338*4882a593Smuzhiyun 		vcpu->arch.busy_preempt = mftb();
339*4882a593Smuzhiyun 	spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun 
kvmppc_set_pvr_hv(struct kvm_vcpu * vcpu,u32 pvr)342*4882a593Smuzhiyun static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun 	vcpu->arch.pvr = pvr;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun /* Dummy value used in computing PCR value below */
348*4882a593Smuzhiyun #define PCR_ARCH_31    (PCR_ARCH_300 << 1)
349*4882a593Smuzhiyun 
kvmppc_set_arch_compat(struct kvm_vcpu * vcpu,u32 arch_compat)350*4882a593Smuzhiyun static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun 	unsigned long host_pcr_bit = 0, guest_pcr_bit = 0;
353*4882a593Smuzhiyun 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	/* We can (emulate) our own architecture version and anything older */
356*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_ARCH_31))
357*4882a593Smuzhiyun 		host_pcr_bit = PCR_ARCH_31;
358*4882a593Smuzhiyun 	else if (cpu_has_feature(CPU_FTR_ARCH_300))
359*4882a593Smuzhiyun 		host_pcr_bit = PCR_ARCH_300;
360*4882a593Smuzhiyun 	else if (cpu_has_feature(CPU_FTR_ARCH_207S))
361*4882a593Smuzhiyun 		host_pcr_bit = PCR_ARCH_207;
362*4882a593Smuzhiyun 	else if (cpu_has_feature(CPU_FTR_ARCH_206))
363*4882a593Smuzhiyun 		host_pcr_bit = PCR_ARCH_206;
364*4882a593Smuzhiyun 	else
365*4882a593Smuzhiyun 		host_pcr_bit = PCR_ARCH_205;
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	/* Determine lowest PCR bit needed to run guest in given PVR level */
368*4882a593Smuzhiyun 	guest_pcr_bit = host_pcr_bit;
369*4882a593Smuzhiyun 	if (arch_compat) {
370*4882a593Smuzhiyun 		switch (arch_compat) {
371*4882a593Smuzhiyun 		case PVR_ARCH_205:
372*4882a593Smuzhiyun 			guest_pcr_bit = PCR_ARCH_205;
373*4882a593Smuzhiyun 			break;
374*4882a593Smuzhiyun 		case PVR_ARCH_206:
375*4882a593Smuzhiyun 		case PVR_ARCH_206p:
376*4882a593Smuzhiyun 			guest_pcr_bit = PCR_ARCH_206;
377*4882a593Smuzhiyun 			break;
378*4882a593Smuzhiyun 		case PVR_ARCH_207:
379*4882a593Smuzhiyun 			guest_pcr_bit = PCR_ARCH_207;
380*4882a593Smuzhiyun 			break;
381*4882a593Smuzhiyun 		case PVR_ARCH_300:
382*4882a593Smuzhiyun 			guest_pcr_bit = PCR_ARCH_300;
383*4882a593Smuzhiyun 			break;
384*4882a593Smuzhiyun 		case PVR_ARCH_31:
385*4882a593Smuzhiyun 			guest_pcr_bit = PCR_ARCH_31;
386*4882a593Smuzhiyun 			break;
387*4882a593Smuzhiyun 		default:
388*4882a593Smuzhiyun 			return -EINVAL;
389*4882a593Smuzhiyun 		}
390*4882a593Smuzhiyun 	}
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	/* Check requested PCR bits don't exceed our capabilities */
393*4882a593Smuzhiyun 	if (guest_pcr_bit > host_pcr_bit)
394*4882a593Smuzhiyun 		return -EINVAL;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	spin_lock(&vc->lock);
397*4882a593Smuzhiyun 	vc->arch_compat = arch_compat;
398*4882a593Smuzhiyun 	/*
399*4882a593Smuzhiyun 	 * Set all PCR bits for which guest_pcr_bit <= bit < host_pcr_bit
400*4882a593Smuzhiyun 	 * Also set all reserved PCR bits
401*4882a593Smuzhiyun 	 */
402*4882a593Smuzhiyun 	vc->pcr = (host_pcr_bit - guest_pcr_bit) | PCR_MASK;
403*4882a593Smuzhiyun 	spin_unlock(&vc->lock);
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	return 0;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun 
kvmppc_dump_regs(struct kvm_vcpu * vcpu)408*4882a593Smuzhiyun static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun 	int r;
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
413*4882a593Smuzhiyun 	pr_err("pc  = %.16lx  msr = %.16llx  trap = %x\n",
414*4882a593Smuzhiyun 	       vcpu->arch.regs.nip, vcpu->arch.shregs.msr, vcpu->arch.trap);
415*4882a593Smuzhiyun 	for (r = 0; r < 16; ++r)
416*4882a593Smuzhiyun 		pr_err("r%2d = %.16lx  r%d = %.16lx\n",
417*4882a593Smuzhiyun 		       r, kvmppc_get_gpr(vcpu, r),
418*4882a593Smuzhiyun 		       r+16, kvmppc_get_gpr(vcpu, r+16));
419*4882a593Smuzhiyun 	pr_err("ctr = %.16lx  lr  = %.16lx\n",
420*4882a593Smuzhiyun 	       vcpu->arch.regs.ctr, vcpu->arch.regs.link);
421*4882a593Smuzhiyun 	pr_err("srr0 = %.16llx srr1 = %.16llx\n",
422*4882a593Smuzhiyun 	       vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
423*4882a593Smuzhiyun 	pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
424*4882a593Smuzhiyun 	       vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
425*4882a593Smuzhiyun 	pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
426*4882a593Smuzhiyun 	       vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
427*4882a593Smuzhiyun 	pr_err("cr = %.8lx  xer = %.16lx  dsisr = %.8x\n",
428*4882a593Smuzhiyun 	       vcpu->arch.regs.ccr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr);
429*4882a593Smuzhiyun 	pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
430*4882a593Smuzhiyun 	pr_err("fault dar = %.16lx dsisr = %.8x\n",
431*4882a593Smuzhiyun 	       vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
432*4882a593Smuzhiyun 	pr_err("SLB (%d entries):\n", vcpu->arch.slb_max);
433*4882a593Smuzhiyun 	for (r = 0; r < vcpu->arch.slb_max; ++r)
434*4882a593Smuzhiyun 		pr_err("  ESID = %.16llx VSID = %.16llx\n",
435*4882a593Smuzhiyun 		       vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
436*4882a593Smuzhiyun 	pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
437*4882a593Smuzhiyun 	       vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1,
438*4882a593Smuzhiyun 	       vcpu->arch.last_inst);
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun 
kvmppc_find_vcpu(struct kvm * kvm,int id)441*4882a593Smuzhiyun static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
442*4882a593Smuzhiyun {
443*4882a593Smuzhiyun 	return kvm_get_vcpu_by_id(kvm, id);
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun 
init_vpa(struct kvm_vcpu * vcpu,struct lppaca * vpa)446*4882a593Smuzhiyun static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun 	vpa->__old_status |= LPPACA_OLD_SHARED_PROC;
449*4882a593Smuzhiyun 	vpa->yield_count = cpu_to_be32(1);
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun 
set_vpa(struct kvm_vcpu * vcpu,struct kvmppc_vpa * v,unsigned long addr,unsigned long len)452*4882a593Smuzhiyun static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v,
453*4882a593Smuzhiyun 		   unsigned long addr, unsigned long len)
454*4882a593Smuzhiyun {
455*4882a593Smuzhiyun 	/* check address is cacheline aligned */
456*4882a593Smuzhiyun 	if (addr & (L1_CACHE_BYTES - 1))
457*4882a593Smuzhiyun 		return -EINVAL;
458*4882a593Smuzhiyun 	spin_lock(&vcpu->arch.vpa_update_lock);
459*4882a593Smuzhiyun 	if (v->next_gpa != addr || v->len != len) {
460*4882a593Smuzhiyun 		v->next_gpa = addr;
461*4882a593Smuzhiyun 		v->len = addr ? len : 0;
462*4882a593Smuzhiyun 		v->update_pending = 1;
463*4882a593Smuzhiyun 	}
464*4882a593Smuzhiyun 	spin_unlock(&vcpu->arch.vpa_update_lock);
465*4882a593Smuzhiyun 	return 0;
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun /* Length for a per-processor buffer is passed in at offset 4 in the buffer */
469*4882a593Smuzhiyun struct reg_vpa {
470*4882a593Smuzhiyun 	u32 dummy;
471*4882a593Smuzhiyun 	union {
472*4882a593Smuzhiyun 		__be16 hword;
473*4882a593Smuzhiyun 		__be32 word;
474*4882a593Smuzhiyun 	} length;
475*4882a593Smuzhiyun };
476*4882a593Smuzhiyun 
vpa_is_registered(struct kvmppc_vpa * vpap)477*4882a593Smuzhiyun static int vpa_is_registered(struct kvmppc_vpa *vpap)
478*4882a593Smuzhiyun {
479*4882a593Smuzhiyun 	if (vpap->update_pending)
480*4882a593Smuzhiyun 		return vpap->next_gpa != 0;
481*4882a593Smuzhiyun 	return vpap->pinned_addr != NULL;
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun 
do_h_register_vpa(struct kvm_vcpu * vcpu,unsigned long flags,unsigned long vcpuid,unsigned long vpa)484*4882a593Smuzhiyun static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
485*4882a593Smuzhiyun 				       unsigned long flags,
486*4882a593Smuzhiyun 				       unsigned long vcpuid, unsigned long vpa)
487*4882a593Smuzhiyun {
488*4882a593Smuzhiyun 	struct kvm *kvm = vcpu->kvm;
489*4882a593Smuzhiyun 	unsigned long len, nb;
490*4882a593Smuzhiyun 	void *va;
491*4882a593Smuzhiyun 	struct kvm_vcpu *tvcpu;
492*4882a593Smuzhiyun 	int err;
493*4882a593Smuzhiyun 	int subfunc;
494*4882a593Smuzhiyun 	struct kvmppc_vpa *vpap;
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
497*4882a593Smuzhiyun 	if (!tvcpu)
498*4882a593Smuzhiyun 		return H_PARAMETER;
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	subfunc = (flags >> H_VPA_FUNC_SHIFT) & H_VPA_FUNC_MASK;
501*4882a593Smuzhiyun 	if (subfunc == H_VPA_REG_VPA || subfunc == H_VPA_REG_DTL ||
502*4882a593Smuzhiyun 	    subfunc == H_VPA_REG_SLB) {
503*4882a593Smuzhiyun 		/* Registering new area - address must be cache-line aligned */
504*4882a593Smuzhiyun 		if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa)
505*4882a593Smuzhiyun 			return H_PARAMETER;
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 		/* convert logical addr to kernel addr and read length */
508*4882a593Smuzhiyun 		va = kvmppc_pin_guest_page(kvm, vpa, &nb);
509*4882a593Smuzhiyun 		if (va == NULL)
510*4882a593Smuzhiyun 			return H_PARAMETER;
511*4882a593Smuzhiyun 		if (subfunc == H_VPA_REG_VPA)
512*4882a593Smuzhiyun 			len = be16_to_cpu(((struct reg_vpa *)va)->length.hword);
513*4882a593Smuzhiyun 		else
514*4882a593Smuzhiyun 			len = be32_to_cpu(((struct reg_vpa *)va)->length.word);
515*4882a593Smuzhiyun 		kvmppc_unpin_guest_page(kvm, va, vpa, false);
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 		/* Check length */
518*4882a593Smuzhiyun 		if (len > nb || len < sizeof(struct reg_vpa))
519*4882a593Smuzhiyun 			return H_PARAMETER;
520*4882a593Smuzhiyun 	} else {
521*4882a593Smuzhiyun 		vpa = 0;
522*4882a593Smuzhiyun 		len = 0;
523*4882a593Smuzhiyun 	}
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 	err = H_PARAMETER;
526*4882a593Smuzhiyun 	vpap = NULL;
527*4882a593Smuzhiyun 	spin_lock(&tvcpu->arch.vpa_update_lock);
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	switch (subfunc) {
530*4882a593Smuzhiyun 	case H_VPA_REG_VPA:		/* register VPA */
531*4882a593Smuzhiyun 		/*
532*4882a593Smuzhiyun 		 * The size of our lppaca is 1kB because of the way we align
533*4882a593Smuzhiyun 		 * it for the guest to avoid crossing a 4kB boundary. We only
534*4882a593Smuzhiyun 		 * use 640 bytes of the structure though, so we should accept
535*4882a593Smuzhiyun 		 * clients that set a size of 640.
536*4882a593Smuzhiyun 		 */
537*4882a593Smuzhiyun 		BUILD_BUG_ON(sizeof(struct lppaca) != 640);
538*4882a593Smuzhiyun 		if (len < sizeof(struct lppaca))
539*4882a593Smuzhiyun 			break;
540*4882a593Smuzhiyun 		vpap = &tvcpu->arch.vpa;
541*4882a593Smuzhiyun 		err = 0;
542*4882a593Smuzhiyun 		break;
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	case H_VPA_REG_DTL:		/* register DTL */
545*4882a593Smuzhiyun 		if (len < sizeof(struct dtl_entry))
546*4882a593Smuzhiyun 			break;
547*4882a593Smuzhiyun 		len -= len % sizeof(struct dtl_entry);
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 		/* Check that they have previously registered a VPA */
550*4882a593Smuzhiyun 		err = H_RESOURCE;
551*4882a593Smuzhiyun 		if (!vpa_is_registered(&tvcpu->arch.vpa))
552*4882a593Smuzhiyun 			break;
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 		vpap = &tvcpu->arch.dtl;
555*4882a593Smuzhiyun 		err = 0;
556*4882a593Smuzhiyun 		break;
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	case H_VPA_REG_SLB:		/* register SLB shadow buffer */
559*4882a593Smuzhiyun 		/* Check that they have previously registered a VPA */
560*4882a593Smuzhiyun 		err = H_RESOURCE;
561*4882a593Smuzhiyun 		if (!vpa_is_registered(&tvcpu->arch.vpa))
562*4882a593Smuzhiyun 			break;
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 		vpap = &tvcpu->arch.slb_shadow;
565*4882a593Smuzhiyun 		err = 0;
566*4882a593Smuzhiyun 		break;
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	case H_VPA_DEREG_VPA:		/* deregister VPA */
569*4882a593Smuzhiyun 		/* Check they don't still have a DTL or SLB buf registered */
570*4882a593Smuzhiyun 		err = H_RESOURCE;
571*4882a593Smuzhiyun 		if (vpa_is_registered(&tvcpu->arch.dtl) ||
572*4882a593Smuzhiyun 		    vpa_is_registered(&tvcpu->arch.slb_shadow))
573*4882a593Smuzhiyun 			break;
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 		vpap = &tvcpu->arch.vpa;
576*4882a593Smuzhiyun 		err = 0;
577*4882a593Smuzhiyun 		break;
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	case H_VPA_DEREG_DTL:		/* deregister DTL */
580*4882a593Smuzhiyun 		vpap = &tvcpu->arch.dtl;
581*4882a593Smuzhiyun 		err = 0;
582*4882a593Smuzhiyun 		break;
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	case H_VPA_DEREG_SLB:		/* deregister SLB shadow buffer */
585*4882a593Smuzhiyun 		vpap = &tvcpu->arch.slb_shadow;
586*4882a593Smuzhiyun 		err = 0;
587*4882a593Smuzhiyun 		break;
588*4882a593Smuzhiyun 	}
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 	if (vpap) {
591*4882a593Smuzhiyun 		vpap->next_gpa = vpa;
592*4882a593Smuzhiyun 		vpap->len = len;
593*4882a593Smuzhiyun 		vpap->update_pending = 1;
594*4882a593Smuzhiyun 	}
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	spin_unlock(&tvcpu->arch.vpa_update_lock);
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	return err;
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun 
kvmppc_update_vpa(struct kvm_vcpu * vcpu,struct kvmppc_vpa * vpap)601*4882a593Smuzhiyun static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
602*4882a593Smuzhiyun {
603*4882a593Smuzhiyun 	struct kvm *kvm = vcpu->kvm;
604*4882a593Smuzhiyun 	void *va;
605*4882a593Smuzhiyun 	unsigned long nb;
606*4882a593Smuzhiyun 	unsigned long gpa;
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	/*
609*4882a593Smuzhiyun 	 * We need to pin the page pointed to by vpap->next_gpa,
610*4882a593Smuzhiyun 	 * but we can't call kvmppc_pin_guest_page under the lock
611*4882a593Smuzhiyun 	 * as it does get_user_pages() and down_read().  So we
612*4882a593Smuzhiyun 	 * have to drop the lock, pin the page, then get the lock
613*4882a593Smuzhiyun 	 * again and check that a new area didn't get registered
614*4882a593Smuzhiyun 	 * in the meantime.
615*4882a593Smuzhiyun 	 */
616*4882a593Smuzhiyun 	for (;;) {
617*4882a593Smuzhiyun 		gpa = vpap->next_gpa;
618*4882a593Smuzhiyun 		spin_unlock(&vcpu->arch.vpa_update_lock);
619*4882a593Smuzhiyun 		va = NULL;
620*4882a593Smuzhiyun 		nb = 0;
621*4882a593Smuzhiyun 		if (gpa)
622*4882a593Smuzhiyun 			va = kvmppc_pin_guest_page(kvm, gpa, &nb);
623*4882a593Smuzhiyun 		spin_lock(&vcpu->arch.vpa_update_lock);
624*4882a593Smuzhiyun 		if (gpa == vpap->next_gpa)
625*4882a593Smuzhiyun 			break;
626*4882a593Smuzhiyun 		/* sigh... unpin that one and try again */
627*4882a593Smuzhiyun 		if (va)
628*4882a593Smuzhiyun 			kvmppc_unpin_guest_page(kvm, va, gpa, false);
629*4882a593Smuzhiyun 	}
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 	vpap->update_pending = 0;
632*4882a593Smuzhiyun 	if (va && nb < vpap->len) {
633*4882a593Smuzhiyun 		/*
634*4882a593Smuzhiyun 		 * If it's now too short, it must be that userspace
635*4882a593Smuzhiyun 		 * has changed the mappings underlying guest memory,
636*4882a593Smuzhiyun 		 * so unregister the region.
637*4882a593Smuzhiyun 		 */
638*4882a593Smuzhiyun 		kvmppc_unpin_guest_page(kvm, va, gpa, false);
639*4882a593Smuzhiyun 		va = NULL;
640*4882a593Smuzhiyun 	}
641*4882a593Smuzhiyun 	if (vpap->pinned_addr)
642*4882a593Smuzhiyun 		kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa,
643*4882a593Smuzhiyun 					vpap->dirty);
644*4882a593Smuzhiyun 	vpap->gpa = gpa;
645*4882a593Smuzhiyun 	vpap->pinned_addr = va;
646*4882a593Smuzhiyun 	vpap->dirty = false;
647*4882a593Smuzhiyun 	if (va)
648*4882a593Smuzhiyun 		vpap->pinned_end = va + vpap->len;
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun 
kvmppc_update_vpas(struct kvm_vcpu * vcpu)651*4882a593Smuzhiyun static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
652*4882a593Smuzhiyun {
653*4882a593Smuzhiyun 	if (!(vcpu->arch.vpa.update_pending ||
654*4882a593Smuzhiyun 	      vcpu->arch.slb_shadow.update_pending ||
655*4882a593Smuzhiyun 	      vcpu->arch.dtl.update_pending))
656*4882a593Smuzhiyun 		return;
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 	spin_lock(&vcpu->arch.vpa_update_lock);
659*4882a593Smuzhiyun 	if (vcpu->arch.vpa.update_pending) {
660*4882a593Smuzhiyun 		kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
661*4882a593Smuzhiyun 		if (vcpu->arch.vpa.pinned_addr)
662*4882a593Smuzhiyun 			init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
663*4882a593Smuzhiyun 	}
664*4882a593Smuzhiyun 	if (vcpu->arch.dtl.update_pending) {
665*4882a593Smuzhiyun 		kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
666*4882a593Smuzhiyun 		vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
667*4882a593Smuzhiyun 		vcpu->arch.dtl_index = 0;
668*4882a593Smuzhiyun 	}
669*4882a593Smuzhiyun 	if (vcpu->arch.slb_shadow.update_pending)
670*4882a593Smuzhiyun 		kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow);
671*4882a593Smuzhiyun 	spin_unlock(&vcpu->arch.vpa_update_lock);
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun /*
675*4882a593Smuzhiyun  * Return the accumulated stolen time for the vcore up until `now'.
676*4882a593Smuzhiyun  * The caller should hold the vcore lock.
677*4882a593Smuzhiyun  */
vcore_stolen_time(struct kvmppc_vcore * vc,u64 now)678*4882a593Smuzhiyun static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
679*4882a593Smuzhiyun {
680*4882a593Smuzhiyun 	u64 p;
681*4882a593Smuzhiyun 	unsigned long flags;
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun 	spin_lock_irqsave(&vc->stoltb_lock, flags);
684*4882a593Smuzhiyun 	p = vc->stolen_tb;
685*4882a593Smuzhiyun 	if (vc->vcore_state != VCORE_INACTIVE &&
686*4882a593Smuzhiyun 	    vc->preempt_tb != TB_NIL)
687*4882a593Smuzhiyun 		p += now - vc->preempt_tb;
688*4882a593Smuzhiyun 	spin_unlock_irqrestore(&vc->stoltb_lock, flags);
689*4882a593Smuzhiyun 	return p;
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun 
kvmppc_create_dtl_entry(struct kvm_vcpu * vcpu,struct kvmppc_vcore * vc)692*4882a593Smuzhiyun static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
693*4882a593Smuzhiyun 				    struct kvmppc_vcore *vc)
694*4882a593Smuzhiyun {
695*4882a593Smuzhiyun 	struct dtl_entry *dt;
696*4882a593Smuzhiyun 	struct lppaca *vpa;
697*4882a593Smuzhiyun 	unsigned long stolen;
698*4882a593Smuzhiyun 	unsigned long core_stolen;
699*4882a593Smuzhiyun 	u64 now;
700*4882a593Smuzhiyun 	unsigned long flags;
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun 	dt = vcpu->arch.dtl_ptr;
703*4882a593Smuzhiyun 	vpa = vcpu->arch.vpa.pinned_addr;
704*4882a593Smuzhiyun 	now = mftb();
705*4882a593Smuzhiyun 	core_stolen = vcore_stolen_time(vc, now);
706*4882a593Smuzhiyun 	stolen = core_stolen - vcpu->arch.stolen_logged;
707*4882a593Smuzhiyun 	vcpu->arch.stolen_logged = core_stolen;
708*4882a593Smuzhiyun 	spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
709*4882a593Smuzhiyun 	stolen += vcpu->arch.busy_stolen;
710*4882a593Smuzhiyun 	vcpu->arch.busy_stolen = 0;
711*4882a593Smuzhiyun 	spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
712*4882a593Smuzhiyun 	if (!dt || !vpa)
713*4882a593Smuzhiyun 		return;
714*4882a593Smuzhiyun 	memset(dt, 0, sizeof(struct dtl_entry));
715*4882a593Smuzhiyun 	dt->dispatch_reason = 7;
716*4882a593Smuzhiyun 	dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid);
717*4882a593Smuzhiyun 	dt->timebase = cpu_to_be64(now + vc->tb_offset);
718*4882a593Smuzhiyun 	dt->enqueue_to_dispatch_time = cpu_to_be32(stolen);
719*4882a593Smuzhiyun 	dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu));
720*4882a593Smuzhiyun 	dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr);
721*4882a593Smuzhiyun 	++dt;
722*4882a593Smuzhiyun 	if (dt == vcpu->arch.dtl.pinned_end)
723*4882a593Smuzhiyun 		dt = vcpu->arch.dtl.pinned_addr;
724*4882a593Smuzhiyun 	vcpu->arch.dtl_ptr = dt;
725*4882a593Smuzhiyun 	/* order writing *dt vs. writing vpa->dtl_idx */
726*4882a593Smuzhiyun 	smp_wmb();
727*4882a593Smuzhiyun 	vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index);
728*4882a593Smuzhiyun 	vcpu->arch.dtl.dirty = true;
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun /* See if there is a doorbell interrupt pending for a vcpu */
kvmppc_doorbell_pending(struct kvm_vcpu * vcpu)732*4882a593Smuzhiyun static bool kvmppc_doorbell_pending(struct kvm_vcpu *vcpu)
733*4882a593Smuzhiyun {
734*4882a593Smuzhiyun 	int thr;
735*4882a593Smuzhiyun 	struct kvmppc_vcore *vc;
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	if (vcpu->arch.doorbell_request)
738*4882a593Smuzhiyun 		return true;
739*4882a593Smuzhiyun 	/*
740*4882a593Smuzhiyun 	 * Ensure that the read of vcore->dpdes comes after the read
741*4882a593Smuzhiyun 	 * of vcpu->doorbell_request.  This barrier matches the
742*4882a593Smuzhiyun 	 * smp_wmb() in kvmppc_guest_entry_inject().
743*4882a593Smuzhiyun 	 */
744*4882a593Smuzhiyun 	smp_rmb();
745*4882a593Smuzhiyun 	vc = vcpu->arch.vcore;
746*4882a593Smuzhiyun 	thr = vcpu->vcpu_id - vc->first_vcpuid;
747*4882a593Smuzhiyun 	return !!(vc->dpdes & (1 << thr));
748*4882a593Smuzhiyun }
749*4882a593Smuzhiyun 
kvmppc_power8_compatible(struct kvm_vcpu * vcpu)750*4882a593Smuzhiyun static bool kvmppc_power8_compatible(struct kvm_vcpu *vcpu)
751*4882a593Smuzhiyun {
752*4882a593Smuzhiyun 	if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207)
753*4882a593Smuzhiyun 		return true;
754*4882a593Smuzhiyun 	if ((!vcpu->arch.vcore->arch_compat) &&
755*4882a593Smuzhiyun 	    cpu_has_feature(CPU_FTR_ARCH_207S))
756*4882a593Smuzhiyun 		return true;
757*4882a593Smuzhiyun 	return false;
758*4882a593Smuzhiyun }
759*4882a593Smuzhiyun 
kvmppc_h_set_mode(struct kvm_vcpu * vcpu,unsigned long mflags,unsigned long resource,unsigned long value1,unsigned long value2)760*4882a593Smuzhiyun static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
761*4882a593Smuzhiyun 			     unsigned long resource, unsigned long value1,
762*4882a593Smuzhiyun 			     unsigned long value2)
763*4882a593Smuzhiyun {
764*4882a593Smuzhiyun 	switch (resource) {
765*4882a593Smuzhiyun 	case H_SET_MODE_RESOURCE_SET_CIABR:
766*4882a593Smuzhiyun 		if (!kvmppc_power8_compatible(vcpu))
767*4882a593Smuzhiyun 			return H_P2;
768*4882a593Smuzhiyun 		if (value2)
769*4882a593Smuzhiyun 			return H_P4;
770*4882a593Smuzhiyun 		if (mflags)
771*4882a593Smuzhiyun 			return H_UNSUPPORTED_FLAG_START;
772*4882a593Smuzhiyun 		/* Guests can't breakpoint the hypervisor */
773*4882a593Smuzhiyun 		if ((value1 & CIABR_PRIV) == CIABR_PRIV_HYPER)
774*4882a593Smuzhiyun 			return H_P3;
775*4882a593Smuzhiyun 		vcpu->arch.ciabr  = value1;
776*4882a593Smuzhiyun 		return H_SUCCESS;
777*4882a593Smuzhiyun 	case H_SET_MODE_RESOURCE_SET_DAWR0:
778*4882a593Smuzhiyun 		if (!kvmppc_power8_compatible(vcpu))
779*4882a593Smuzhiyun 			return H_P2;
780*4882a593Smuzhiyun 		if (!ppc_breakpoint_available())
781*4882a593Smuzhiyun 			return H_P2;
782*4882a593Smuzhiyun 		if (mflags)
783*4882a593Smuzhiyun 			return H_UNSUPPORTED_FLAG_START;
784*4882a593Smuzhiyun 		if (value2 & DABRX_HYP)
785*4882a593Smuzhiyun 			return H_P4;
786*4882a593Smuzhiyun 		vcpu->arch.dawr  = value1;
787*4882a593Smuzhiyun 		vcpu->arch.dawrx = value2;
788*4882a593Smuzhiyun 		return H_SUCCESS;
789*4882a593Smuzhiyun 	case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE:
790*4882a593Smuzhiyun 		/* KVM does not support mflags=2 (AIL=2) */
791*4882a593Smuzhiyun 		if (mflags != 0 && mflags != 3)
792*4882a593Smuzhiyun 			return H_UNSUPPORTED_FLAG_START;
793*4882a593Smuzhiyun 		return H_TOO_HARD;
794*4882a593Smuzhiyun 	default:
795*4882a593Smuzhiyun 		return H_TOO_HARD;
796*4882a593Smuzhiyun 	}
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun /* Copy guest memory in place - must reside within a single memslot */
kvmppc_copy_guest(struct kvm * kvm,gpa_t to,gpa_t from,unsigned long len)800*4882a593Smuzhiyun static int kvmppc_copy_guest(struct kvm *kvm, gpa_t to, gpa_t from,
801*4882a593Smuzhiyun 				  unsigned long len)
802*4882a593Smuzhiyun {
803*4882a593Smuzhiyun 	struct kvm_memory_slot *to_memslot = NULL;
804*4882a593Smuzhiyun 	struct kvm_memory_slot *from_memslot = NULL;
805*4882a593Smuzhiyun 	unsigned long to_addr, from_addr;
806*4882a593Smuzhiyun 	int r;
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 	/* Get HPA for from address */
809*4882a593Smuzhiyun 	from_memslot = gfn_to_memslot(kvm, from >> PAGE_SHIFT);
810*4882a593Smuzhiyun 	if (!from_memslot)
811*4882a593Smuzhiyun 		return -EFAULT;
812*4882a593Smuzhiyun 	if ((from + len) >= ((from_memslot->base_gfn + from_memslot->npages)
813*4882a593Smuzhiyun 			     << PAGE_SHIFT))
814*4882a593Smuzhiyun 		return -EINVAL;
815*4882a593Smuzhiyun 	from_addr = gfn_to_hva_memslot(from_memslot, from >> PAGE_SHIFT);
816*4882a593Smuzhiyun 	if (kvm_is_error_hva(from_addr))
817*4882a593Smuzhiyun 		return -EFAULT;
818*4882a593Smuzhiyun 	from_addr |= (from & (PAGE_SIZE - 1));
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun 	/* Get HPA for to address */
821*4882a593Smuzhiyun 	to_memslot = gfn_to_memslot(kvm, to >> PAGE_SHIFT);
822*4882a593Smuzhiyun 	if (!to_memslot)
823*4882a593Smuzhiyun 		return -EFAULT;
824*4882a593Smuzhiyun 	if ((to + len) >= ((to_memslot->base_gfn + to_memslot->npages)
825*4882a593Smuzhiyun 			   << PAGE_SHIFT))
826*4882a593Smuzhiyun 		return -EINVAL;
827*4882a593Smuzhiyun 	to_addr = gfn_to_hva_memslot(to_memslot, to >> PAGE_SHIFT);
828*4882a593Smuzhiyun 	if (kvm_is_error_hva(to_addr))
829*4882a593Smuzhiyun 		return -EFAULT;
830*4882a593Smuzhiyun 	to_addr |= (to & (PAGE_SIZE - 1));
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun 	/* Perform copy */
833*4882a593Smuzhiyun 	r = raw_copy_in_user((void __user *)to_addr, (void __user *)from_addr,
834*4882a593Smuzhiyun 			     len);
835*4882a593Smuzhiyun 	if (r)
836*4882a593Smuzhiyun 		return -EFAULT;
837*4882a593Smuzhiyun 	mark_page_dirty(kvm, to >> PAGE_SHIFT);
838*4882a593Smuzhiyun 	return 0;
839*4882a593Smuzhiyun }
840*4882a593Smuzhiyun 
kvmppc_h_page_init(struct kvm_vcpu * vcpu,unsigned long flags,unsigned long dest,unsigned long src)841*4882a593Smuzhiyun static long kvmppc_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
842*4882a593Smuzhiyun 			       unsigned long dest, unsigned long src)
843*4882a593Smuzhiyun {
844*4882a593Smuzhiyun 	u64 pg_sz = SZ_4K;		/* 4K page size */
845*4882a593Smuzhiyun 	u64 pg_mask = SZ_4K - 1;
846*4882a593Smuzhiyun 	int ret;
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun 	/* Check for invalid flags (H_PAGE_SET_LOANED covers all CMO flags) */
849*4882a593Smuzhiyun 	if (flags & ~(H_ICACHE_INVALIDATE | H_ICACHE_SYNCHRONIZE |
850*4882a593Smuzhiyun 		      H_ZERO_PAGE | H_COPY_PAGE | H_PAGE_SET_LOANED))
851*4882a593Smuzhiyun 		return H_PARAMETER;
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun 	/* dest (and src if copy_page flag set) must be page aligned */
854*4882a593Smuzhiyun 	if ((dest & pg_mask) || ((flags & H_COPY_PAGE) && (src & pg_mask)))
855*4882a593Smuzhiyun 		return H_PARAMETER;
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 	/* zero and/or copy the page as determined by the flags */
858*4882a593Smuzhiyun 	if (flags & H_COPY_PAGE) {
859*4882a593Smuzhiyun 		ret = kvmppc_copy_guest(vcpu->kvm, dest, src, pg_sz);
860*4882a593Smuzhiyun 		if (ret < 0)
861*4882a593Smuzhiyun 			return H_PARAMETER;
862*4882a593Smuzhiyun 	} else if (flags & H_ZERO_PAGE) {
863*4882a593Smuzhiyun 		ret = kvm_clear_guest(vcpu->kvm, dest, pg_sz);
864*4882a593Smuzhiyun 		if (ret < 0)
865*4882a593Smuzhiyun 			return H_PARAMETER;
866*4882a593Smuzhiyun 	}
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun 	/* We can ignore the remaining flags */
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun 	return H_SUCCESS;
871*4882a593Smuzhiyun }
872*4882a593Smuzhiyun 
kvm_arch_vcpu_yield_to(struct kvm_vcpu * target)873*4882a593Smuzhiyun static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target)
874*4882a593Smuzhiyun {
875*4882a593Smuzhiyun 	struct kvmppc_vcore *vcore = target->arch.vcore;
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun 	/*
878*4882a593Smuzhiyun 	 * We expect to have been called by the real mode handler
879*4882a593Smuzhiyun 	 * (kvmppc_rm_h_confer()) which would have directly returned
880*4882a593Smuzhiyun 	 * H_SUCCESS if the source vcore wasn't idle (e.g. if it may
881*4882a593Smuzhiyun 	 * have useful work to do and should not confer) so we don't
882*4882a593Smuzhiyun 	 * recheck that here.
883*4882a593Smuzhiyun 	 */
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 	spin_lock(&vcore->lock);
886*4882a593Smuzhiyun 	if (target->arch.state == KVMPPC_VCPU_RUNNABLE &&
887*4882a593Smuzhiyun 	    vcore->vcore_state != VCORE_INACTIVE &&
888*4882a593Smuzhiyun 	    vcore->runner)
889*4882a593Smuzhiyun 		target = vcore->runner;
890*4882a593Smuzhiyun 	spin_unlock(&vcore->lock);
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 	return kvm_vcpu_yield_to(target);
893*4882a593Smuzhiyun }
894*4882a593Smuzhiyun 
kvmppc_get_yield_count(struct kvm_vcpu * vcpu)895*4882a593Smuzhiyun static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
896*4882a593Smuzhiyun {
897*4882a593Smuzhiyun 	int yield_count = 0;
898*4882a593Smuzhiyun 	struct lppaca *lppaca;
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 	spin_lock(&vcpu->arch.vpa_update_lock);
901*4882a593Smuzhiyun 	lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr;
902*4882a593Smuzhiyun 	if (lppaca)
903*4882a593Smuzhiyun 		yield_count = be32_to_cpu(lppaca->yield_count);
904*4882a593Smuzhiyun 	spin_unlock(&vcpu->arch.vpa_update_lock);
905*4882a593Smuzhiyun 	return yield_count;
906*4882a593Smuzhiyun }
907*4882a593Smuzhiyun 
kvmppc_pseries_do_hcall(struct kvm_vcpu * vcpu)908*4882a593Smuzhiyun int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
909*4882a593Smuzhiyun {
910*4882a593Smuzhiyun 	unsigned long req = kvmppc_get_gpr(vcpu, 3);
911*4882a593Smuzhiyun 	unsigned long target, ret = H_SUCCESS;
912*4882a593Smuzhiyun 	int yield_count;
913*4882a593Smuzhiyun 	struct kvm_vcpu *tvcpu;
914*4882a593Smuzhiyun 	int idx, rc;
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	if (req <= MAX_HCALL_OPCODE &&
917*4882a593Smuzhiyun 	    !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls))
918*4882a593Smuzhiyun 		return RESUME_HOST;
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun 	switch (req) {
921*4882a593Smuzhiyun 	case H_CEDE:
922*4882a593Smuzhiyun 		break;
923*4882a593Smuzhiyun 	case H_PROD:
924*4882a593Smuzhiyun 		target = kvmppc_get_gpr(vcpu, 4);
925*4882a593Smuzhiyun 		tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
926*4882a593Smuzhiyun 		if (!tvcpu) {
927*4882a593Smuzhiyun 			ret = H_PARAMETER;
928*4882a593Smuzhiyun 			break;
929*4882a593Smuzhiyun 		}
930*4882a593Smuzhiyun 		tvcpu->arch.prodded = 1;
931*4882a593Smuzhiyun 		smp_mb();
932*4882a593Smuzhiyun 		if (tvcpu->arch.ceded)
933*4882a593Smuzhiyun 			kvmppc_fast_vcpu_kick_hv(tvcpu);
934*4882a593Smuzhiyun 		break;
935*4882a593Smuzhiyun 	case H_CONFER:
936*4882a593Smuzhiyun 		target = kvmppc_get_gpr(vcpu, 4);
937*4882a593Smuzhiyun 		if (target == -1)
938*4882a593Smuzhiyun 			break;
939*4882a593Smuzhiyun 		tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
940*4882a593Smuzhiyun 		if (!tvcpu) {
941*4882a593Smuzhiyun 			ret = H_PARAMETER;
942*4882a593Smuzhiyun 			break;
943*4882a593Smuzhiyun 		}
944*4882a593Smuzhiyun 		yield_count = kvmppc_get_gpr(vcpu, 5);
945*4882a593Smuzhiyun 		if (kvmppc_get_yield_count(tvcpu) != yield_count)
946*4882a593Smuzhiyun 			break;
947*4882a593Smuzhiyun 		kvm_arch_vcpu_yield_to(tvcpu);
948*4882a593Smuzhiyun 		break;
949*4882a593Smuzhiyun 	case H_REGISTER_VPA:
950*4882a593Smuzhiyun 		ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
951*4882a593Smuzhiyun 					kvmppc_get_gpr(vcpu, 5),
952*4882a593Smuzhiyun 					kvmppc_get_gpr(vcpu, 6));
953*4882a593Smuzhiyun 		break;
954*4882a593Smuzhiyun 	case H_RTAS:
955*4882a593Smuzhiyun 		if (list_empty(&vcpu->kvm->arch.rtas_tokens))
956*4882a593Smuzhiyun 			return RESUME_HOST;
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun 		idx = srcu_read_lock(&vcpu->kvm->srcu);
959*4882a593Smuzhiyun 		rc = kvmppc_rtas_hcall(vcpu);
960*4882a593Smuzhiyun 		srcu_read_unlock(&vcpu->kvm->srcu, idx);
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun 		if (rc == -ENOENT)
963*4882a593Smuzhiyun 			return RESUME_HOST;
964*4882a593Smuzhiyun 		else if (rc == 0)
965*4882a593Smuzhiyun 			break;
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 		/* Send the error out to userspace via KVM_RUN */
968*4882a593Smuzhiyun 		return rc;
969*4882a593Smuzhiyun 	case H_LOGICAL_CI_LOAD:
970*4882a593Smuzhiyun 		ret = kvmppc_h_logical_ci_load(vcpu);
971*4882a593Smuzhiyun 		if (ret == H_TOO_HARD)
972*4882a593Smuzhiyun 			return RESUME_HOST;
973*4882a593Smuzhiyun 		break;
974*4882a593Smuzhiyun 	case H_LOGICAL_CI_STORE:
975*4882a593Smuzhiyun 		ret = kvmppc_h_logical_ci_store(vcpu);
976*4882a593Smuzhiyun 		if (ret == H_TOO_HARD)
977*4882a593Smuzhiyun 			return RESUME_HOST;
978*4882a593Smuzhiyun 		break;
979*4882a593Smuzhiyun 	case H_SET_MODE:
980*4882a593Smuzhiyun 		ret = kvmppc_h_set_mode(vcpu, kvmppc_get_gpr(vcpu, 4),
981*4882a593Smuzhiyun 					kvmppc_get_gpr(vcpu, 5),
982*4882a593Smuzhiyun 					kvmppc_get_gpr(vcpu, 6),
983*4882a593Smuzhiyun 					kvmppc_get_gpr(vcpu, 7));
984*4882a593Smuzhiyun 		if (ret == H_TOO_HARD)
985*4882a593Smuzhiyun 			return RESUME_HOST;
986*4882a593Smuzhiyun 		break;
987*4882a593Smuzhiyun 	case H_XIRR:
988*4882a593Smuzhiyun 	case H_CPPR:
989*4882a593Smuzhiyun 	case H_EOI:
990*4882a593Smuzhiyun 	case H_IPI:
991*4882a593Smuzhiyun 	case H_IPOLL:
992*4882a593Smuzhiyun 	case H_XIRR_X:
993*4882a593Smuzhiyun 		if (kvmppc_xics_enabled(vcpu)) {
994*4882a593Smuzhiyun 			if (xics_on_xive()) {
995*4882a593Smuzhiyun 				ret = H_NOT_AVAILABLE;
996*4882a593Smuzhiyun 				return RESUME_GUEST;
997*4882a593Smuzhiyun 			}
998*4882a593Smuzhiyun 			ret = kvmppc_xics_hcall(vcpu, req);
999*4882a593Smuzhiyun 			break;
1000*4882a593Smuzhiyun 		}
1001*4882a593Smuzhiyun 		return RESUME_HOST;
1002*4882a593Smuzhiyun 	case H_SET_DABR:
1003*4882a593Smuzhiyun 		ret = kvmppc_h_set_dabr(vcpu, kvmppc_get_gpr(vcpu, 4));
1004*4882a593Smuzhiyun 		break;
1005*4882a593Smuzhiyun 	case H_SET_XDABR:
1006*4882a593Smuzhiyun 		ret = kvmppc_h_set_xdabr(vcpu, kvmppc_get_gpr(vcpu, 4),
1007*4882a593Smuzhiyun 						kvmppc_get_gpr(vcpu, 5));
1008*4882a593Smuzhiyun 		break;
1009*4882a593Smuzhiyun #ifdef CONFIG_SPAPR_TCE_IOMMU
1010*4882a593Smuzhiyun 	case H_GET_TCE:
1011*4882a593Smuzhiyun 		ret = kvmppc_h_get_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
1012*4882a593Smuzhiyun 						kvmppc_get_gpr(vcpu, 5));
1013*4882a593Smuzhiyun 		if (ret == H_TOO_HARD)
1014*4882a593Smuzhiyun 			return RESUME_HOST;
1015*4882a593Smuzhiyun 		break;
1016*4882a593Smuzhiyun 	case H_PUT_TCE:
1017*4882a593Smuzhiyun 		ret = kvmppc_h_put_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
1018*4882a593Smuzhiyun 						kvmppc_get_gpr(vcpu, 5),
1019*4882a593Smuzhiyun 						kvmppc_get_gpr(vcpu, 6));
1020*4882a593Smuzhiyun 		if (ret == H_TOO_HARD)
1021*4882a593Smuzhiyun 			return RESUME_HOST;
1022*4882a593Smuzhiyun 		break;
1023*4882a593Smuzhiyun 	case H_PUT_TCE_INDIRECT:
1024*4882a593Smuzhiyun 		ret = kvmppc_h_put_tce_indirect(vcpu, kvmppc_get_gpr(vcpu, 4),
1025*4882a593Smuzhiyun 						kvmppc_get_gpr(vcpu, 5),
1026*4882a593Smuzhiyun 						kvmppc_get_gpr(vcpu, 6),
1027*4882a593Smuzhiyun 						kvmppc_get_gpr(vcpu, 7));
1028*4882a593Smuzhiyun 		if (ret == H_TOO_HARD)
1029*4882a593Smuzhiyun 			return RESUME_HOST;
1030*4882a593Smuzhiyun 		break;
1031*4882a593Smuzhiyun 	case H_STUFF_TCE:
1032*4882a593Smuzhiyun 		ret = kvmppc_h_stuff_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
1033*4882a593Smuzhiyun 						kvmppc_get_gpr(vcpu, 5),
1034*4882a593Smuzhiyun 						kvmppc_get_gpr(vcpu, 6),
1035*4882a593Smuzhiyun 						kvmppc_get_gpr(vcpu, 7));
1036*4882a593Smuzhiyun 		if (ret == H_TOO_HARD)
1037*4882a593Smuzhiyun 			return RESUME_HOST;
1038*4882a593Smuzhiyun 		break;
1039*4882a593Smuzhiyun #endif
1040*4882a593Smuzhiyun 	case H_RANDOM:
1041*4882a593Smuzhiyun 		if (!powernv_get_random_long(&vcpu->arch.regs.gpr[4]))
1042*4882a593Smuzhiyun 			ret = H_HARDWARE;
1043*4882a593Smuzhiyun 		break;
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 	case H_SET_PARTITION_TABLE:
1046*4882a593Smuzhiyun 		ret = H_FUNCTION;
1047*4882a593Smuzhiyun 		if (nesting_enabled(vcpu->kvm))
1048*4882a593Smuzhiyun 			ret = kvmhv_set_partition_table(vcpu);
1049*4882a593Smuzhiyun 		break;
1050*4882a593Smuzhiyun 	case H_ENTER_NESTED:
1051*4882a593Smuzhiyun 		ret = H_FUNCTION;
1052*4882a593Smuzhiyun 		if (!nesting_enabled(vcpu->kvm))
1053*4882a593Smuzhiyun 			break;
1054*4882a593Smuzhiyun 		ret = kvmhv_enter_nested_guest(vcpu);
1055*4882a593Smuzhiyun 		if (ret == H_INTERRUPT) {
1056*4882a593Smuzhiyun 			kvmppc_set_gpr(vcpu, 3, 0);
1057*4882a593Smuzhiyun 			vcpu->arch.hcall_needed = 0;
1058*4882a593Smuzhiyun 			return -EINTR;
1059*4882a593Smuzhiyun 		} else if (ret == H_TOO_HARD) {
1060*4882a593Smuzhiyun 			kvmppc_set_gpr(vcpu, 3, 0);
1061*4882a593Smuzhiyun 			vcpu->arch.hcall_needed = 0;
1062*4882a593Smuzhiyun 			return RESUME_HOST;
1063*4882a593Smuzhiyun 		}
1064*4882a593Smuzhiyun 		break;
1065*4882a593Smuzhiyun 	case H_TLB_INVALIDATE:
1066*4882a593Smuzhiyun 		ret = H_FUNCTION;
1067*4882a593Smuzhiyun 		if (nesting_enabled(vcpu->kvm))
1068*4882a593Smuzhiyun 			ret = kvmhv_do_nested_tlbie(vcpu);
1069*4882a593Smuzhiyun 		break;
1070*4882a593Smuzhiyun 	case H_COPY_TOFROM_GUEST:
1071*4882a593Smuzhiyun 		ret = H_FUNCTION;
1072*4882a593Smuzhiyun 		if (nesting_enabled(vcpu->kvm))
1073*4882a593Smuzhiyun 			ret = kvmhv_copy_tofrom_guest_nested(vcpu);
1074*4882a593Smuzhiyun 		break;
1075*4882a593Smuzhiyun 	case H_PAGE_INIT:
1076*4882a593Smuzhiyun 		ret = kvmppc_h_page_init(vcpu, kvmppc_get_gpr(vcpu, 4),
1077*4882a593Smuzhiyun 					 kvmppc_get_gpr(vcpu, 5),
1078*4882a593Smuzhiyun 					 kvmppc_get_gpr(vcpu, 6));
1079*4882a593Smuzhiyun 		break;
1080*4882a593Smuzhiyun 	case H_SVM_PAGE_IN:
1081*4882a593Smuzhiyun 		ret = H_UNSUPPORTED;
1082*4882a593Smuzhiyun 		if (kvmppc_get_srr1(vcpu) & MSR_S)
1083*4882a593Smuzhiyun 			ret = kvmppc_h_svm_page_in(vcpu->kvm,
1084*4882a593Smuzhiyun 						   kvmppc_get_gpr(vcpu, 4),
1085*4882a593Smuzhiyun 						   kvmppc_get_gpr(vcpu, 5),
1086*4882a593Smuzhiyun 						   kvmppc_get_gpr(vcpu, 6));
1087*4882a593Smuzhiyun 		break;
1088*4882a593Smuzhiyun 	case H_SVM_PAGE_OUT:
1089*4882a593Smuzhiyun 		ret = H_UNSUPPORTED;
1090*4882a593Smuzhiyun 		if (kvmppc_get_srr1(vcpu) & MSR_S)
1091*4882a593Smuzhiyun 			ret = kvmppc_h_svm_page_out(vcpu->kvm,
1092*4882a593Smuzhiyun 						    kvmppc_get_gpr(vcpu, 4),
1093*4882a593Smuzhiyun 						    kvmppc_get_gpr(vcpu, 5),
1094*4882a593Smuzhiyun 						    kvmppc_get_gpr(vcpu, 6));
1095*4882a593Smuzhiyun 		break;
1096*4882a593Smuzhiyun 	case H_SVM_INIT_START:
1097*4882a593Smuzhiyun 		ret = H_UNSUPPORTED;
1098*4882a593Smuzhiyun 		if (kvmppc_get_srr1(vcpu) & MSR_S)
1099*4882a593Smuzhiyun 			ret = kvmppc_h_svm_init_start(vcpu->kvm);
1100*4882a593Smuzhiyun 		break;
1101*4882a593Smuzhiyun 	case H_SVM_INIT_DONE:
1102*4882a593Smuzhiyun 		ret = H_UNSUPPORTED;
1103*4882a593Smuzhiyun 		if (kvmppc_get_srr1(vcpu) & MSR_S)
1104*4882a593Smuzhiyun 			ret = kvmppc_h_svm_init_done(vcpu->kvm);
1105*4882a593Smuzhiyun 		break;
1106*4882a593Smuzhiyun 	case H_SVM_INIT_ABORT:
1107*4882a593Smuzhiyun 		/*
1108*4882a593Smuzhiyun 		 * Even if that call is made by the Ultravisor, the SSR1 value
1109*4882a593Smuzhiyun 		 * is the guest context one, with the secure bit clear as it has
1110*4882a593Smuzhiyun 		 * not yet been secured. So we can't check it here.
1111*4882a593Smuzhiyun 		 * Instead the kvm->arch.secure_guest flag is checked inside
1112*4882a593Smuzhiyun 		 * kvmppc_h_svm_init_abort().
1113*4882a593Smuzhiyun 		 */
1114*4882a593Smuzhiyun 		ret = kvmppc_h_svm_init_abort(vcpu->kvm);
1115*4882a593Smuzhiyun 		break;
1116*4882a593Smuzhiyun 
1117*4882a593Smuzhiyun 	default:
1118*4882a593Smuzhiyun 		return RESUME_HOST;
1119*4882a593Smuzhiyun 	}
1120*4882a593Smuzhiyun 	kvmppc_set_gpr(vcpu, 3, ret);
1121*4882a593Smuzhiyun 	vcpu->arch.hcall_needed = 0;
1122*4882a593Smuzhiyun 	return RESUME_GUEST;
1123*4882a593Smuzhiyun }
1124*4882a593Smuzhiyun 
1125*4882a593Smuzhiyun /*
1126*4882a593Smuzhiyun  * Handle H_CEDE in the nested virtualization case where we haven't
1127*4882a593Smuzhiyun  * called the real-mode hcall handlers in book3s_hv_rmhandlers.S.
1128*4882a593Smuzhiyun  * This has to be done early, not in kvmppc_pseries_do_hcall(), so
1129*4882a593Smuzhiyun  * that the cede logic in kvmppc_run_single_vcpu() works properly.
1130*4882a593Smuzhiyun  */
kvmppc_nested_cede(struct kvm_vcpu * vcpu)1131*4882a593Smuzhiyun static void kvmppc_nested_cede(struct kvm_vcpu *vcpu)
1132*4882a593Smuzhiyun {
1133*4882a593Smuzhiyun 	vcpu->arch.shregs.msr |= MSR_EE;
1134*4882a593Smuzhiyun 	vcpu->arch.ceded = 1;
1135*4882a593Smuzhiyun 	smp_mb();
1136*4882a593Smuzhiyun 	if (vcpu->arch.prodded) {
1137*4882a593Smuzhiyun 		vcpu->arch.prodded = 0;
1138*4882a593Smuzhiyun 		smp_mb();
1139*4882a593Smuzhiyun 		vcpu->arch.ceded = 0;
1140*4882a593Smuzhiyun 	}
1141*4882a593Smuzhiyun }
1142*4882a593Smuzhiyun 
kvmppc_hcall_impl_hv(unsigned long cmd)1143*4882a593Smuzhiyun static int kvmppc_hcall_impl_hv(unsigned long cmd)
1144*4882a593Smuzhiyun {
1145*4882a593Smuzhiyun 	switch (cmd) {
1146*4882a593Smuzhiyun 	case H_CEDE:
1147*4882a593Smuzhiyun 	case H_PROD:
1148*4882a593Smuzhiyun 	case H_CONFER:
1149*4882a593Smuzhiyun 	case H_REGISTER_VPA:
1150*4882a593Smuzhiyun 	case H_SET_MODE:
1151*4882a593Smuzhiyun 	case H_LOGICAL_CI_LOAD:
1152*4882a593Smuzhiyun 	case H_LOGICAL_CI_STORE:
1153*4882a593Smuzhiyun #ifdef CONFIG_KVM_XICS
1154*4882a593Smuzhiyun 	case H_XIRR:
1155*4882a593Smuzhiyun 	case H_CPPR:
1156*4882a593Smuzhiyun 	case H_EOI:
1157*4882a593Smuzhiyun 	case H_IPI:
1158*4882a593Smuzhiyun 	case H_IPOLL:
1159*4882a593Smuzhiyun 	case H_XIRR_X:
1160*4882a593Smuzhiyun #endif
1161*4882a593Smuzhiyun 	case H_PAGE_INIT:
1162*4882a593Smuzhiyun 		return 1;
1163*4882a593Smuzhiyun 	}
1164*4882a593Smuzhiyun 
1165*4882a593Smuzhiyun 	/* See if it's in the real-mode table */
1166*4882a593Smuzhiyun 	return kvmppc_hcall_impl_hv_realmode(cmd);
1167*4882a593Smuzhiyun }
1168*4882a593Smuzhiyun 
kvmppc_emulate_debug_inst(struct kvm_vcpu * vcpu)1169*4882a593Smuzhiyun static int kvmppc_emulate_debug_inst(struct kvm_vcpu *vcpu)
1170*4882a593Smuzhiyun {
1171*4882a593Smuzhiyun 	u32 last_inst;
1172*4882a593Smuzhiyun 
1173*4882a593Smuzhiyun 	if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) !=
1174*4882a593Smuzhiyun 					EMULATE_DONE) {
1175*4882a593Smuzhiyun 		/*
1176*4882a593Smuzhiyun 		 * Fetch failed, so return to guest and
1177*4882a593Smuzhiyun 		 * try executing it again.
1178*4882a593Smuzhiyun 		 */
1179*4882a593Smuzhiyun 		return RESUME_GUEST;
1180*4882a593Smuzhiyun 	}
1181*4882a593Smuzhiyun 
1182*4882a593Smuzhiyun 	if (last_inst == KVMPPC_INST_SW_BREAKPOINT) {
1183*4882a593Smuzhiyun 		vcpu->run->exit_reason = KVM_EXIT_DEBUG;
1184*4882a593Smuzhiyun 		vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu);
1185*4882a593Smuzhiyun 		return RESUME_HOST;
1186*4882a593Smuzhiyun 	} else {
1187*4882a593Smuzhiyun 		kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
1188*4882a593Smuzhiyun 		return RESUME_GUEST;
1189*4882a593Smuzhiyun 	}
1190*4882a593Smuzhiyun }
1191*4882a593Smuzhiyun 
do_nothing(void * x)1192*4882a593Smuzhiyun static void do_nothing(void *x)
1193*4882a593Smuzhiyun {
1194*4882a593Smuzhiyun }
1195*4882a593Smuzhiyun 
kvmppc_read_dpdes(struct kvm_vcpu * vcpu)1196*4882a593Smuzhiyun static unsigned long kvmppc_read_dpdes(struct kvm_vcpu *vcpu)
1197*4882a593Smuzhiyun {
1198*4882a593Smuzhiyun 	int thr, cpu, pcpu, nthreads;
1199*4882a593Smuzhiyun 	struct kvm_vcpu *v;
1200*4882a593Smuzhiyun 	unsigned long dpdes;
1201*4882a593Smuzhiyun 
1202*4882a593Smuzhiyun 	nthreads = vcpu->kvm->arch.emul_smt_mode;
1203*4882a593Smuzhiyun 	dpdes = 0;
1204*4882a593Smuzhiyun 	cpu = vcpu->vcpu_id & ~(nthreads - 1);
1205*4882a593Smuzhiyun 	for (thr = 0; thr < nthreads; ++thr, ++cpu) {
1206*4882a593Smuzhiyun 		v = kvmppc_find_vcpu(vcpu->kvm, cpu);
1207*4882a593Smuzhiyun 		if (!v)
1208*4882a593Smuzhiyun 			continue;
1209*4882a593Smuzhiyun 		/*
1210*4882a593Smuzhiyun 		 * If the vcpu is currently running on a physical cpu thread,
1211*4882a593Smuzhiyun 		 * interrupt it in order to pull it out of the guest briefly,
1212*4882a593Smuzhiyun 		 * which will update its vcore->dpdes value.
1213*4882a593Smuzhiyun 		 */
1214*4882a593Smuzhiyun 		pcpu = READ_ONCE(v->cpu);
1215*4882a593Smuzhiyun 		if (pcpu >= 0)
1216*4882a593Smuzhiyun 			smp_call_function_single(pcpu, do_nothing, NULL, 1);
1217*4882a593Smuzhiyun 		if (kvmppc_doorbell_pending(v))
1218*4882a593Smuzhiyun 			dpdes |= 1 << thr;
1219*4882a593Smuzhiyun 	}
1220*4882a593Smuzhiyun 	return dpdes;
1221*4882a593Smuzhiyun }
1222*4882a593Smuzhiyun 
1223*4882a593Smuzhiyun /*
1224*4882a593Smuzhiyun  * On POWER9, emulate doorbell-related instructions in order to
1225*4882a593Smuzhiyun  * give the guest the illusion of running on a multi-threaded core.
1226*4882a593Smuzhiyun  * The instructions emulated are msgsndp, msgclrp, mfspr TIR,
1227*4882a593Smuzhiyun  * and mfspr DPDES.
1228*4882a593Smuzhiyun  */
kvmppc_emulate_doorbell_instr(struct kvm_vcpu * vcpu)1229*4882a593Smuzhiyun static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
1230*4882a593Smuzhiyun {
1231*4882a593Smuzhiyun 	u32 inst, rb, thr;
1232*4882a593Smuzhiyun 	unsigned long arg;
1233*4882a593Smuzhiyun 	struct kvm *kvm = vcpu->kvm;
1234*4882a593Smuzhiyun 	struct kvm_vcpu *tvcpu;
1235*4882a593Smuzhiyun 
1236*4882a593Smuzhiyun 	if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst) != EMULATE_DONE)
1237*4882a593Smuzhiyun 		return RESUME_GUEST;
1238*4882a593Smuzhiyun 	if (get_op(inst) != 31)
1239*4882a593Smuzhiyun 		return EMULATE_FAIL;
1240*4882a593Smuzhiyun 	rb = get_rb(inst);
1241*4882a593Smuzhiyun 	thr = vcpu->vcpu_id & (kvm->arch.emul_smt_mode - 1);
1242*4882a593Smuzhiyun 	switch (get_xop(inst)) {
1243*4882a593Smuzhiyun 	case OP_31_XOP_MSGSNDP:
1244*4882a593Smuzhiyun 		arg = kvmppc_get_gpr(vcpu, rb);
1245*4882a593Smuzhiyun 		if (((arg >> 27) & 0xf) != PPC_DBELL_SERVER)
1246*4882a593Smuzhiyun 			break;
1247*4882a593Smuzhiyun 		arg &= 0x3f;
1248*4882a593Smuzhiyun 		if (arg >= kvm->arch.emul_smt_mode)
1249*4882a593Smuzhiyun 			break;
1250*4882a593Smuzhiyun 		tvcpu = kvmppc_find_vcpu(kvm, vcpu->vcpu_id - thr + arg);
1251*4882a593Smuzhiyun 		if (!tvcpu)
1252*4882a593Smuzhiyun 			break;
1253*4882a593Smuzhiyun 		if (!tvcpu->arch.doorbell_request) {
1254*4882a593Smuzhiyun 			tvcpu->arch.doorbell_request = 1;
1255*4882a593Smuzhiyun 			kvmppc_fast_vcpu_kick_hv(tvcpu);
1256*4882a593Smuzhiyun 		}
1257*4882a593Smuzhiyun 		break;
1258*4882a593Smuzhiyun 	case OP_31_XOP_MSGCLRP:
1259*4882a593Smuzhiyun 		arg = kvmppc_get_gpr(vcpu, rb);
1260*4882a593Smuzhiyun 		if (((arg >> 27) & 0xf) != PPC_DBELL_SERVER)
1261*4882a593Smuzhiyun 			break;
1262*4882a593Smuzhiyun 		vcpu->arch.vcore->dpdes = 0;
1263*4882a593Smuzhiyun 		vcpu->arch.doorbell_request = 0;
1264*4882a593Smuzhiyun 		break;
1265*4882a593Smuzhiyun 	case OP_31_XOP_MFSPR:
1266*4882a593Smuzhiyun 		switch (get_sprn(inst)) {
1267*4882a593Smuzhiyun 		case SPRN_TIR:
1268*4882a593Smuzhiyun 			arg = thr;
1269*4882a593Smuzhiyun 			break;
1270*4882a593Smuzhiyun 		case SPRN_DPDES:
1271*4882a593Smuzhiyun 			arg = kvmppc_read_dpdes(vcpu);
1272*4882a593Smuzhiyun 			break;
1273*4882a593Smuzhiyun 		default:
1274*4882a593Smuzhiyun 			return EMULATE_FAIL;
1275*4882a593Smuzhiyun 		}
1276*4882a593Smuzhiyun 		kvmppc_set_gpr(vcpu, get_rt(inst), arg);
1277*4882a593Smuzhiyun 		break;
1278*4882a593Smuzhiyun 	default:
1279*4882a593Smuzhiyun 		return EMULATE_FAIL;
1280*4882a593Smuzhiyun 	}
1281*4882a593Smuzhiyun 	kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
1282*4882a593Smuzhiyun 	return RESUME_GUEST;
1283*4882a593Smuzhiyun }
1284*4882a593Smuzhiyun 
kvmppc_handle_exit_hv(struct kvm_vcpu * vcpu,struct task_struct * tsk)1285*4882a593Smuzhiyun static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
1286*4882a593Smuzhiyun 				 struct task_struct *tsk)
1287*4882a593Smuzhiyun {
1288*4882a593Smuzhiyun 	struct kvm_run *run = vcpu->run;
1289*4882a593Smuzhiyun 	int r = RESUME_HOST;
1290*4882a593Smuzhiyun 
1291*4882a593Smuzhiyun 	vcpu->stat.sum_exits++;
1292*4882a593Smuzhiyun 
1293*4882a593Smuzhiyun 	/*
1294*4882a593Smuzhiyun 	 * This can happen if an interrupt occurs in the last stages
1295*4882a593Smuzhiyun 	 * of guest entry or the first stages of guest exit (i.e. after
1296*4882a593Smuzhiyun 	 * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV
1297*4882a593Smuzhiyun 	 * and before setting it to KVM_GUEST_MODE_HOST_HV).
1298*4882a593Smuzhiyun 	 * That can happen due to a bug, or due to a machine check
1299*4882a593Smuzhiyun 	 * occurring at just the wrong time.
1300*4882a593Smuzhiyun 	 */
1301*4882a593Smuzhiyun 	if (vcpu->arch.shregs.msr & MSR_HV) {
1302*4882a593Smuzhiyun 		printk(KERN_EMERG "KVM trap in HV mode!\n");
1303*4882a593Smuzhiyun 		printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
1304*4882a593Smuzhiyun 			vcpu->arch.trap, kvmppc_get_pc(vcpu),
1305*4882a593Smuzhiyun 			vcpu->arch.shregs.msr);
1306*4882a593Smuzhiyun 		kvmppc_dump_regs(vcpu);
1307*4882a593Smuzhiyun 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1308*4882a593Smuzhiyun 		run->hw.hardware_exit_reason = vcpu->arch.trap;
1309*4882a593Smuzhiyun 		return RESUME_HOST;
1310*4882a593Smuzhiyun 	}
1311*4882a593Smuzhiyun 	run->exit_reason = KVM_EXIT_UNKNOWN;
1312*4882a593Smuzhiyun 	run->ready_for_interrupt_injection = 1;
1313*4882a593Smuzhiyun 	switch (vcpu->arch.trap) {
1314*4882a593Smuzhiyun 	/* We're good on these - the host merely wanted to get our attention */
1315*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_HV_DECREMENTER:
1316*4882a593Smuzhiyun 		vcpu->stat.dec_exits++;
1317*4882a593Smuzhiyun 		r = RESUME_GUEST;
1318*4882a593Smuzhiyun 		break;
1319*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_EXTERNAL:
1320*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_H_DOORBELL:
1321*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_H_VIRT:
1322*4882a593Smuzhiyun 		vcpu->stat.ext_intr_exits++;
1323*4882a593Smuzhiyun 		r = RESUME_GUEST;
1324*4882a593Smuzhiyun 		break;
1325*4882a593Smuzhiyun 	/* SR/HMI/PMI are HV interrupts that host has handled. Resume guest.*/
1326*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_HMI:
1327*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_PERFMON:
1328*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_SYSTEM_RESET:
1329*4882a593Smuzhiyun 		r = RESUME_GUEST;
1330*4882a593Smuzhiyun 		break;
1331*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_MACHINE_CHECK:
1332*4882a593Smuzhiyun 		/* Print the MCE event to host console. */
1333*4882a593Smuzhiyun 		machine_check_print_event_info(&vcpu->arch.mce_evt, false, true);
1334*4882a593Smuzhiyun 
1335*4882a593Smuzhiyun 		/*
1336*4882a593Smuzhiyun 		 * If the guest can do FWNMI, exit to userspace so it can
1337*4882a593Smuzhiyun 		 * deliver a FWNMI to the guest.
1338*4882a593Smuzhiyun 		 * Otherwise we synthesize a machine check for the guest
1339*4882a593Smuzhiyun 		 * so that it knows that the machine check occurred.
1340*4882a593Smuzhiyun 		 */
1341*4882a593Smuzhiyun 		if (!vcpu->kvm->arch.fwnmi_enabled) {
1342*4882a593Smuzhiyun 			ulong flags = vcpu->arch.shregs.msr & 0x083c0000;
1343*4882a593Smuzhiyun 			kvmppc_core_queue_machine_check(vcpu, flags);
1344*4882a593Smuzhiyun 			r = RESUME_GUEST;
1345*4882a593Smuzhiyun 			break;
1346*4882a593Smuzhiyun 		}
1347*4882a593Smuzhiyun 
1348*4882a593Smuzhiyun 		/* Exit to guest with KVM_EXIT_NMI as exit reason */
1349*4882a593Smuzhiyun 		run->exit_reason = KVM_EXIT_NMI;
1350*4882a593Smuzhiyun 		run->hw.hardware_exit_reason = vcpu->arch.trap;
1351*4882a593Smuzhiyun 		/* Clear out the old NMI status from run->flags */
1352*4882a593Smuzhiyun 		run->flags &= ~KVM_RUN_PPC_NMI_DISP_MASK;
1353*4882a593Smuzhiyun 		/* Now set the NMI status */
1354*4882a593Smuzhiyun 		if (vcpu->arch.mce_evt.disposition == MCE_DISPOSITION_RECOVERED)
1355*4882a593Smuzhiyun 			run->flags |= KVM_RUN_PPC_NMI_DISP_FULLY_RECOV;
1356*4882a593Smuzhiyun 		else
1357*4882a593Smuzhiyun 			run->flags |= KVM_RUN_PPC_NMI_DISP_NOT_RECOV;
1358*4882a593Smuzhiyun 
1359*4882a593Smuzhiyun 		r = RESUME_HOST;
1360*4882a593Smuzhiyun 		break;
1361*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_PROGRAM:
1362*4882a593Smuzhiyun 	{
1363*4882a593Smuzhiyun 		ulong flags;
1364*4882a593Smuzhiyun 		/*
1365*4882a593Smuzhiyun 		 * Normally program interrupts are delivered directly
1366*4882a593Smuzhiyun 		 * to the guest by the hardware, but we can get here
1367*4882a593Smuzhiyun 		 * as a result of a hypervisor emulation interrupt
1368*4882a593Smuzhiyun 		 * (e40) getting turned into a 700 by BML RTAS.
1369*4882a593Smuzhiyun 		 */
1370*4882a593Smuzhiyun 		flags = vcpu->arch.shregs.msr & 0x1f0000ull;
1371*4882a593Smuzhiyun 		kvmppc_core_queue_program(vcpu, flags);
1372*4882a593Smuzhiyun 		r = RESUME_GUEST;
1373*4882a593Smuzhiyun 		break;
1374*4882a593Smuzhiyun 	}
1375*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_SYSCALL:
1376*4882a593Smuzhiyun 	{
1377*4882a593Smuzhiyun 		/* hcall - punt to userspace */
1378*4882a593Smuzhiyun 		int i;
1379*4882a593Smuzhiyun 
1380*4882a593Smuzhiyun 		/* hypercall with MSR_PR has already been handled in rmode,
1381*4882a593Smuzhiyun 		 * and never reaches here.
1382*4882a593Smuzhiyun 		 */
1383*4882a593Smuzhiyun 
1384*4882a593Smuzhiyun 		run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
1385*4882a593Smuzhiyun 		for (i = 0; i < 9; ++i)
1386*4882a593Smuzhiyun 			run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
1387*4882a593Smuzhiyun 		run->exit_reason = KVM_EXIT_PAPR_HCALL;
1388*4882a593Smuzhiyun 		vcpu->arch.hcall_needed = 1;
1389*4882a593Smuzhiyun 		r = RESUME_HOST;
1390*4882a593Smuzhiyun 		break;
1391*4882a593Smuzhiyun 	}
1392*4882a593Smuzhiyun 	/*
1393*4882a593Smuzhiyun 	 * We get these next two if the guest accesses a page which it thinks
1394*4882a593Smuzhiyun 	 * it has mapped but which is not actually present, either because
1395*4882a593Smuzhiyun 	 * it is for an emulated I/O device or because the corresonding
1396*4882a593Smuzhiyun 	 * host page has been paged out.  Any other HDSI/HISI interrupts
1397*4882a593Smuzhiyun 	 * have been handled already.
1398*4882a593Smuzhiyun 	 */
1399*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_H_DATA_STORAGE:
1400*4882a593Smuzhiyun 		r = RESUME_PAGE_FAULT;
1401*4882a593Smuzhiyun 		break;
1402*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_H_INST_STORAGE:
1403*4882a593Smuzhiyun 		vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
1404*4882a593Smuzhiyun 		vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr &
1405*4882a593Smuzhiyun 			DSISR_SRR1_MATCH_64S;
1406*4882a593Smuzhiyun 		if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
1407*4882a593Smuzhiyun 			vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
1408*4882a593Smuzhiyun 		r = RESUME_PAGE_FAULT;
1409*4882a593Smuzhiyun 		break;
1410*4882a593Smuzhiyun 	/*
1411*4882a593Smuzhiyun 	 * This occurs if the guest executes an illegal instruction.
1412*4882a593Smuzhiyun 	 * If the guest debug is disabled, generate a program interrupt
1413*4882a593Smuzhiyun 	 * to the guest. If guest debug is enabled, we need to check
1414*4882a593Smuzhiyun 	 * whether the instruction is a software breakpoint instruction.
1415*4882a593Smuzhiyun 	 * Accordingly return to Guest or Host.
1416*4882a593Smuzhiyun 	 */
1417*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
1418*4882a593Smuzhiyun 		if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED)
1419*4882a593Smuzhiyun 			vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ?
1420*4882a593Smuzhiyun 				swab32(vcpu->arch.emul_inst) :
1421*4882a593Smuzhiyun 				vcpu->arch.emul_inst;
1422*4882a593Smuzhiyun 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
1423*4882a593Smuzhiyun 			r = kvmppc_emulate_debug_inst(vcpu);
1424*4882a593Smuzhiyun 		} else {
1425*4882a593Smuzhiyun 			kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
1426*4882a593Smuzhiyun 			r = RESUME_GUEST;
1427*4882a593Smuzhiyun 		}
1428*4882a593Smuzhiyun 		break;
1429*4882a593Smuzhiyun 	/*
1430*4882a593Smuzhiyun 	 * This occurs if the guest (kernel or userspace), does something that
1431*4882a593Smuzhiyun 	 * is prohibited by HFSCR.
1432*4882a593Smuzhiyun 	 * On POWER9, this could be a doorbell instruction that we need
1433*4882a593Smuzhiyun 	 * to emulate.
1434*4882a593Smuzhiyun 	 * Otherwise, we just generate a program interrupt to the guest.
1435*4882a593Smuzhiyun 	 */
1436*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
1437*4882a593Smuzhiyun 		r = EMULATE_FAIL;
1438*4882a593Smuzhiyun 		if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) &&
1439*4882a593Smuzhiyun 		    cpu_has_feature(CPU_FTR_ARCH_300))
1440*4882a593Smuzhiyun 			r = kvmppc_emulate_doorbell_instr(vcpu);
1441*4882a593Smuzhiyun 		if (r == EMULATE_FAIL) {
1442*4882a593Smuzhiyun 			kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
1443*4882a593Smuzhiyun 			r = RESUME_GUEST;
1444*4882a593Smuzhiyun 		}
1445*4882a593Smuzhiyun 		break;
1446*4882a593Smuzhiyun 
1447*4882a593Smuzhiyun #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1448*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_HV_SOFTPATCH:
1449*4882a593Smuzhiyun 		/*
1450*4882a593Smuzhiyun 		 * This occurs for various TM-related instructions that
1451*4882a593Smuzhiyun 		 * we need to emulate on POWER9 DD2.2.  We have already
1452*4882a593Smuzhiyun 		 * handled the cases where the guest was in real-suspend
1453*4882a593Smuzhiyun 		 * mode and was transitioning to transactional state.
1454*4882a593Smuzhiyun 		 */
1455*4882a593Smuzhiyun 		r = kvmhv_p9_tm_emulation(vcpu);
1456*4882a593Smuzhiyun 		break;
1457*4882a593Smuzhiyun #endif
1458*4882a593Smuzhiyun 
1459*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_HV_RM_HARD:
1460*4882a593Smuzhiyun 		r = RESUME_PASSTHROUGH;
1461*4882a593Smuzhiyun 		break;
1462*4882a593Smuzhiyun 	default:
1463*4882a593Smuzhiyun 		kvmppc_dump_regs(vcpu);
1464*4882a593Smuzhiyun 		printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
1465*4882a593Smuzhiyun 			vcpu->arch.trap, kvmppc_get_pc(vcpu),
1466*4882a593Smuzhiyun 			vcpu->arch.shregs.msr);
1467*4882a593Smuzhiyun 		run->hw.hardware_exit_reason = vcpu->arch.trap;
1468*4882a593Smuzhiyun 		r = RESUME_HOST;
1469*4882a593Smuzhiyun 		break;
1470*4882a593Smuzhiyun 	}
1471*4882a593Smuzhiyun 
1472*4882a593Smuzhiyun 	return r;
1473*4882a593Smuzhiyun }
1474*4882a593Smuzhiyun 
kvmppc_handle_nested_exit(struct kvm_vcpu * vcpu)1475*4882a593Smuzhiyun static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
1476*4882a593Smuzhiyun {
1477*4882a593Smuzhiyun 	int r;
1478*4882a593Smuzhiyun 	int srcu_idx;
1479*4882a593Smuzhiyun 
1480*4882a593Smuzhiyun 	vcpu->stat.sum_exits++;
1481*4882a593Smuzhiyun 
1482*4882a593Smuzhiyun 	/*
1483*4882a593Smuzhiyun 	 * This can happen if an interrupt occurs in the last stages
1484*4882a593Smuzhiyun 	 * of guest entry or the first stages of guest exit (i.e. after
1485*4882a593Smuzhiyun 	 * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV
1486*4882a593Smuzhiyun 	 * and before setting it to KVM_GUEST_MODE_HOST_HV).
1487*4882a593Smuzhiyun 	 * That can happen due to a bug, or due to a machine check
1488*4882a593Smuzhiyun 	 * occurring at just the wrong time.
1489*4882a593Smuzhiyun 	 */
1490*4882a593Smuzhiyun 	if (vcpu->arch.shregs.msr & MSR_HV) {
1491*4882a593Smuzhiyun 		pr_emerg("KVM trap in HV mode while nested!\n");
1492*4882a593Smuzhiyun 		pr_emerg("trap=0x%x | pc=0x%lx | msr=0x%llx\n",
1493*4882a593Smuzhiyun 			 vcpu->arch.trap, kvmppc_get_pc(vcpu),
1494*4882a593Smuzhiyun 			 vcpu->arch.shregs.msr);
1495*4882a593Smuzhiyun 		kvmppc_dump_regs(vcpu);
1496*4882a593Smuzhiyun 		return RESUME_HOST;
1497*4882a593Smuzhiyun 	}
1498*4882a593Smuzhiyun 	switch (vcpu->arch.trap) {
1499*4882a593Smuzhiyun 	/* We're good on these - the host merely wanted to get our attention */
1500*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_HV_DECREMENTER:
1501*4882a593Smuzhiyun 		vcpu->stat.dec_exits++;
1502*4882a593Smuzhiyun 		r = RESUME_GUEST;
1503*4882a593Smuzhiyun 		break;
1504*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_EXTERNAL:
1505*4882a593Smuzhiyun 		vcpu->stat.ext_intr_exits++;
1506*4882a593Smuzhiyun 		r = RESUME_HOST;
1507*4882a593Smuzhiyun 		break;
1508*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_H_DOORBELL:
1509*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_H_VIRT:
1510*4882a593Smuzhiyun 		vcpu->stat.ext_intr_exits++;
1511*4882a593Smuzhiyun 		r = RESUME_GUEST;
1512*4882a593Smuzhiyun 		break;
1513*4882a593Smuzhiyun 	/* SR/HMI/PMI are HV interrupts that host has handled. Resume guest.*/
1514*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_HMI:
1515*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_PERFMON:
1516*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_SYSTEM_RESET:
1517*4882a593Smuzhiyun 		r = RESUME_GUEST;
1518*4882a593Smuzhiyun 		break;
1519*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_MACHINE_CHECK:
1520*4882a593Smuzhiyun 		/* Pass the machine check to the L1 guest */
1521*4882a593Smuzhiyun 		r = RESUME_HOST;
1522*4882a593Smuzhiyun 		/* Print the MCE event to host console. */
1523*4882a593Smuzhiyun 		machine_check_print_event_info(&vcpu->arch.mce_evt, false, true);
1524*4882a593Smuzhiyun 		break;
1525*4882a593Smuzhiyun 	/*
1526*4882a593Smuzhiyun 	 * We get these next two if the guest accesses a page which it thinks
1527*4882a593Smuzhiyun 	 * it has mapped but which is not actually present, either because
1528*4882a593Smuzhiyun 	 * it is for an emulated I/O device or because the corresonding
1529*4882a593Smuzhiyun 	 * host page has been paged out.
1530*4882a593Smuzhiyun 	 */
1531*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_H_DATA_STORAGE:
1532*4882a593Smuzhiyun 		srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1533*4882a593Smuzhiyun 		r = kvmhv_nested_page_fault(vcpu);
1534*4882a593Smuzhiyun 		srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
1535*4882a593Smuzhiyun 		break;
1536*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_H_INST_STORAGE:
1537*4882a593Smuzhiyun 		vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
1538*4882a593Smuzhiyun 		vcpu->arch.fault_dsisr = kvmppc_get_msr(vcpu) &
1539*4882a593Smuzhiyun 					 DSISR_SRR1_MATCH_64S;
1540*4882a593Smuzhiyun 		if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
1541*4882a593Smuzhiyun 			vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
1542*4882a593Smuzhiyun 		srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1543*4882a593Smuzhiyun 		r = kvmhv_nested_page_fault(vcpu);
1544*4882a593Smuzhiyun 		srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
1545*4882a593Smuzhiyun 		break;
1546*4882a593Smuzhiyun 
1547*4882a593Smuzhiyun #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1548*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_HV_SOFTPATCH:
1549*4882a593Smuzhiyun 		/*
1550*4882a593Smuzhiyun 		 * This occurs for various TM-related instructions that
1551*4882a593Smuzhiyun 		 * we need to emulate on POWER9 DD2.2.  We have already
1552*4882a593Smuzhiyun 		 * handled the cases where the guest was in real-suspend
1553*4882a593Smuzhiyun 		 * mode and was transitioning to transactional state.
1554*4882a593Smuzhiyun 		 */
1555*4882a593Smuzhiyun 		r = kvmhv_p9_tm_emulation(vcpu);
1556*4882a593Smuzhiyun 		break;
1557*4882a593Smuzhiyun #endif
1558*4882a593Smuzhiyun 
1559*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_HV_RM_HARD:
1560*4882a593Smuzhiyun 		vcpu->arch.trap = 0;
1561*4882a593Smuzhiyun 		r = RESUME_GUEST;
1562*4882a593Smuzhiyun 		if (!xics_on_xive())
1563*4882a593Smuzhiyun 			kvmppc_xics_rm_complete(vcpu, 0);
1564*4882a593Smuzhiyun 		break;
1565*4882a593Smuzhiyun 	default:
1566*4882a593Smuzhiyun 		r = RESUME_HOST;
1567*4882a593Smuzhiyun 		break;
1568*4882a593Smuzhiyun 	}
1569*4882a593Smuzhiyun 
1570*4882a593Smuzhiyun 	return r;
1571*4882a593Smuzhiyun }
1572*4882a593Smuzhiyun 
kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1573*4882a593Smuzhiyun static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu,
1574*4882a593Smuzhiyun 					    struct kvm_sregs *sregs)
1575*4882a593Smuzhiyun {
1576*4882a593Smuzhiyun 	int i;
1577*4882a593Smuzhiyun 
1578*4882a593Smuzhiyun 	memset(sregs, 0, sizeof(struct kvm_sregs));
1579*4882a593Smuzhiyun 	sregs->pvr = vcpu->arch.pvr;
1580*4882a593Smuzhiyun 	for (i = 0; i < vcpu->arch.slb_max; i++) {
1581*4882a593Smuzhiyun 		sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
1582*4882a593Smuzhiyun 		sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1583*4882a593Smuzhiyun 	}
1584*4882a593Smuzhiyun 
1585*4882a593Smuzhiyun 	return 0;
1586*4882a593Smuzhiyun }
1587*4882a593Smuzhiyun 
kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1588*4882a593Smuzhiyun static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
1589*4882a593Smuzhiyun 					    struct kvm_sregs *sregs)
1590*4882a593Smuzhiyun {
1591*4882a593Smuzhiyun 	int i, j;
1592*4882a593Smuzhiyun 
1593*4882a593Smuzhiyun 	/* Only accept the same PVR as the host's, since we can't spoof it */
1594*4882a593Smuzhiyun 	if (sregs->pvr != vcpu->arch.pvr)
1595*4882a593Smuzhiyun 		return -EINVAL;
1596*4882a593Smuzhiyun 
1597*4882a593Smuzhiyun 	j = 0;
1598*4882a593Smuzhiyun 	for (i = 0; i < vcpu->arch.slb_nr; i++) {
1599*4882a593Smuzhiyun 		if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) {
1600*4882a593Smuzhiyun 			vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe;
1601*4882a593Smuzhiyun 			vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv;
1602*4882a593Smuzhiyun 			++j;
1603*4882a593Smuzhiyun 		}
1604*4882a593Smuzhiyun 	}
1605*4882a593Smuzhiyun 	vcpu->arch.slb_max = j;
1606*4882a593Smuzhiyun 
1607*4882a593Smuzhiyun 	return 0;
1608*4882a593Smuzhiyun }
1609*4882a593Smuzhiyun 
kvmppc_set_lpcr(struct kvm_vcpu * vcpu,u64 new_lpcr,bool preserve_top32)1610*4882a593Smuzhiyun static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
1611*4882a593Smuzhiyun 		bool preserve_top32)
1612*4882a593Smuzhiyun {
1613*4882a593Smuzhiyun 	struct kvm *kvm = vcpu->kvm;
1614*4882a593Smuzhiyun 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
1615*4882a593Smuzhiyun 	u64 mask;
1616*4882a593Smuzhiyun 
1617*4882a593Smuzhiyun 	spin_lock(&vc->lock);
1618*4882a593Smuzhiyun 	/*
1619*4882a593Smuzhiyun 	 * If ILE (interrupt little-endian) has changed, update the
1620*4882a593Smuzhiyun 	 * MSR_LE bit in the intr_msr for each vcpu in this vcore.
1621*4882a593Smuzhiyun 	 */
1622*4882a593Smuzhiyun 	if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
1623*4882a593Smuzhiyun 		struct kvm_vcpu *vcpu;
1624*4882a593Smuzhiyun 		int i;
1625*4882a593Smuzhiyun 
1626*4882a593Smuzhiyun 		kvm_for_each_vcpu(i, vcpu, kvm) {
1627*4882a593Smuzhiyun 			if (vcpu->arch.vcore != vc)
1628*4882a593Smuzhiyun 				continue;
1629*4882a593Smuzhiyun 			if (new_lpcr & LPCR_ILE)
1630*4882a593Smuzhiyun 				vcpu->arch.intr_msr |= MSR_LE;
1631*4882a593Smuzhiyun 			else
1632*4882a593Smuzhiyun 				vcpu->arch.intr_msr &= ~MSR_LE;
1633*4882a593Smuzhiyun 		}
1634*4882a593Smuzhiyun 	}
1635*4882a593Smuzhiyun 
1636*4882a593Smuzhiyun 	/*
1637*4882a593Smuzhiyun 	 * Userspace can only modify DPFD (default prefetch depth),
1638*4882a593Smuzhiyun 	 * ILE (interrupt little-endian) and TC (translation control).
1639*4882a593Smuzhiyun 	 * On POWER8 and POWER9 userspace can also modify AIL (alt. interrupt loc.).
1640*4882a593Smuzhiyun 	 */
1641*4882a593Smuzhiyun 	mask = LPCR_DPFD | LPCR_ILE | LPCR_TC;
1642*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_ARCH_207S))
1643*4882a593Smuzhiyun 		mask |= LPCR_AIL;
1644*4882a593Smuzhiyun 	/*
1645*4882a593Smuzhiyun 	 * On POWER9, allow userspace to enable large decrementer for the
1646*4882a593Smuzhiyun 	 * guest, whether or not the host has it enabled.
1647*4882a593Smuzhiyun 	 */
1648*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_ARCH_300))
1649*4882a593Smuzhiyun 		mask |= LPCR_LD;
1650*4882a593Smuzhiyun 
1651*4882a593Smuzhiyun 	/* Broken 32-bit version of LPCR must not clear top bits */
1652*4882a593Smuzhiyun 	if (preserve_top32)
1653*4882a593Smuzhiyun 		mask &= 0xFFFFFFFF;
1654*4882a593Smuzhiyun 	vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
1655*4882a593Smuzhiyun 	spin_unlock(&vc->lock);
1656*4882a593Smuzhiyun }
1657*4882a593Smuzhiyun 
kvmppc_get_one_reg_hv(struct kvm_vcpu * vcpu,u64 id,union kvmppc_one_reg * val)1658*4882a593Smuzhiyun static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1659*4882a593Smuzhiyun 				 union kvmppc_one_reg *val)
1660*4882a593Smuzhiyun {
1661*4882a593Smuzhiyun 	int r = 0;
1662*4882a593Smuzhiyun 	long int i;
1663*4882a593Smuzhiyun 
1664*4882a593Smuzhiyun 	switch (id) {
1665*4882a593Smuzhiyun 	case KVM_REG_PPC_DEBUG_INST:
1666*4882a593Smuzhiyun 		*val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
1667*4882a593Smuzhiyun 		break;
1668*4882a593Smuzhiyun 	case KVM_REG_PPC_HIOR:
1669*4882a593Smuzhiyun 		*val = get_reg_val(id, 0);
1670*4882a593Smuzhiyun 		break;
1671*4882a593Smuzhiyun 	case KVM_REG_PPC_DABR:
1672*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.dabr);
1673*4882a593Smuzhiyun 		break;
1674*4882a593Smuzhiyun 	case KVM_REG_PPC_DABRX:
1675*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.dabrx);
1676*4882a593Smuzhiyun 		break;
1677*4882a593Smuzhiyun 	case KVM_REG_PPC_DSCR:
1678*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.dscr);
1679*4882a593Smuzhiyun 		break;
1680*4882a593Smuzhiyun 	case KVM_REG_PPC_PURR:
1681*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.purr);
1682*4882a593Smuzhiyun 		break;
1683*4882a593Smuzhiyun 	case KVM_REG_PPC_SPURR:
1684*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.spurr);
1685*4882a593Smuzhiyun 		break;
1686*4882a593Smuzhiyun 	case KVM_REG_PPC_AMR:
1687*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.amr);
1688*4882a593Smuzhiyun 		break;
1689*4882a593Smuzhiyun 	case KVM_REG_PPC_UAMOR:
1690*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.uamor);
1691*4882a593Smuzhiyun 		break;
1692*4882a593Smuzhiyun 	case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1:
1693*4882a593Smuzhiyun 		i = id - KVM_REG_PPC_MMCR0;
1694*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.mmcr[i]);
1695*4882a593Smuzhiyun 		break;
1696*4882a593Smuzhiyun 	case KVM_REG_PPC_MMCR2:
1697*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.mmcr[2]);
1698*4882a593Smuzhiyun 		break;
1699*4882a593Smuzhiyun 	case KVM_REG_PPC_MMCRA:
1700*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.mmcra);
1701*4882a593Smuzhiyun 		break;
1702*4882a593Smuzhiyun 	case KVM_REG_PPC_MMCRS:
1703*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.mmcrs);
1704*4882a593Smuzhiyun 		break;
1705*4882a593Smuzhiyun 	case KVM_REG_PPC_MMCR3:
1706*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.mmcr[3]);
1707*4882a593Smuzhiyun 		break;
1708*4882a593Smuzhiyun 	case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
1709*4882a593Smuzhiyun 		i = id - KVM_REG_PPC_PMC1;
1710*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.pmc[i]);
1711*4882a593Smuzhiyun 		break;
1712*4882a593Smuzhiyun 	case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
1713*4882a593Smuzhiyun 		i = id - KVM_REG_PPC_SPMC1;
1714*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.spmc[i]);
1715*4882a593Smuzhiyun 		break;
1716*4882a593Smuzhiyun 	case KVM_REG_PPC_SIAR:
1717*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.siar);
1718*4882a593Smuzhiyun 		break;
1719*4882a593Smuzhiyun 	case KVM_REG_PPC_SDAR:
1720*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.sdar);
1721*4882a593Smuzhiyun 		break;
1722*4882a593Smuzhiyun 	case KVM_REG_PPC_SIER:
1723*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.sier[0]);
1724*4882a593Smuzhiyun 		break;
1725*4882a593Smuzhiyun 	case KVM_REG_PPC_SIER2:
1726*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.sier[1]);
1727*4882a593Smuzhiyun 		break;
1728*4882a593Smuzhiyun 	case KVM_REG_PPC_SIER3:
1729*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.sier[2]);
1730*4882a593Smuzhiyun 		break;
1731*4882a593Smuzhiyun 	case KVM_REG_PPC_IAMR:
1732*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.iamr);
1733*4882a593Smuzhiyun 		break;
1734*4882a593Smuzhiyun 	case KVM_REG_PPC_PSPB:
1735*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.pspb);
1736*4882a593Smuzhiyun 		break;
1737*4882a593Smuzhiyun 	case KVM_REG_PPC_DPDES:
1738*4882a593Smuzhiyun 		/*
1739*4882a593Smuzhiyun 		 * On POWER9, where we are emulating msgsndp etc.,
1740*4882a593Smuzhiyun 		 * we return 1 bit for each vcpu, which can come from
1741*4882a593Smuzhiyun 		 * either vcore->dpdes or doorbell_request.
1742*4882a593Smuzhiyun 		 * On POWER8, doorbell_request is 0.
1743*4882a593Smuzhiyun 		 */
1744*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.vcore->dpdes |
1745*4882a593Smuzhiyun 				   vcpu->arch.doorbell_request);
1746*4882a593Smuzhiyun 		break;
1747*4882a593Smuzhiyun 	case KVM_REG_PPC_VTB:
1748*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.vcore->vtb);
1749*4882a593Smuzhiyun 		break;
1750*4882a593Smuzhiyun 	case KVM_REG_PPC_DAWR:
1751*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.dawr);
1752*4882a593Smuzhiyun 		break;
1753*4882a593Smuzhiyun 	case KVM_REG_PPC_DAWRX:
1754*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.dawrx);
1755*4882a593Smuzhiyun 		break;
1756*4882a593Smuzhiyun 	case KVM_REG_PPC_CIABR:
1757*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.ciabr);
1758*4882a593Smuzhiyun 		break;
1759*4882a593Smuzhiyun 	case KVM_REG_PPC_CSIGR:
1760*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.csigr);
1761*4882a593Smuzhiyun 		break;
1762*4882a593Smuzhiyun 	case KVM_REG_PPC_TACR:
1763*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.tacr);
1764*4882a593Smuzhiyun 		break;
1765*4882a593Smuzhiyun 	case KVM_REG_PPC_TCSCR:
1766*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.tcscr);
1767*4882a593Smuzhiyun 		break;
1768*4882a593Smuzhiyun 	case KVM_REG_PPC_PID:
1769*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.pid);
1770*4882a593Smuzhiyun 		break;
1771*4882a593Smuzhiyun 	case KVM_REG_PPC_ACOP:
1772*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.acop);
1773*4882a593Smuzhiyun 		break;
1774*4882a593Smuzhiyun 	case KVM_REG_PPC_WORT:
1775*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.wort);
1776*4882a593Smuzhiyun 		break;
1777*4882a593Smuzhiyun 	case KVM_REG_PPC_TIDR:
1778*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.tid);
1779*4882a593Smuzhiyun 		break;
1780*4882a593Smuzhiyun 	case KVM_REG_PPC_PSSCR:
1781*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.psscr);
1782*4882a593Smuzhiyun 		break;
1783*4882a593Smuzhiyun 	case KVM_REG_PPC_VPA_ADDR:
1784*4882a593Smuzhiyun 		spin_lock(&vcpu->arch.vpa_update_lock);
1785*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
1786*4882a593Smuzhiyun 		spin_unlock(&vcpu->arch.vpa_update_lock);
1787*4882a593Smuzhiyun 		break;
1788*4882a593Smuzhiyun 	case KVM_REG_PPC_VPA_SLB:
1789*4882a593Smuzhiyun 		spin_lock(&vcpu->arch.vpa_update_lock);
1790*4882a593Smuzhiyun 		val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa;
1791*4882a593Smuzhiyun 		val->vpaval.length = vcpu->arch.slb_shadow.len;
1792*4882a593Smuzhiyun 		spin_unlock(&vcpu->arch.vpa_update_lock);
1793*4882a593Smuzhiyun 		break;
1794*4882a593Smuzhiyun 	case KVM_REG_PPC_VPA_DTL:
1795*4882a593Smuzhiyun 		spin_lock(&vcpu->arch.vpa_update_lock);
1796*4882a593Smuzhiyun 		val->vpaval.addr = vcpu->arch.dtl.next_gpa;
1797*4882a593Smuzhiyun 		val->vpaval.length = vcpu->arch.dtl.len;
1798*4882a593Smuzhiyun 		spin_unlock(&vcpu->arch.vpa_update_lock);
1799*4882a593Smuzhiyun 		break;
1800*4882a593Smuzhiyun 	case KVM_REG_PPC_TB_OFFSET:
1801*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.vcore->tb_offset);
1802*4882a593Smuzhiyun 		break;
1803*4882a593Smuzhiyun 	case KVM_REG_PPC_LPCR:
1804*4882a593Smuzhiyun 	case KVM_REG_PPC_LPCR_64:
1805*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.vcore->lpcr);
1806*4882a593Smuzhiyun 		break;
1807*4882a593Smuzhiyun 	case KVM_REG_PPC_PPR:
1808*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.ppr);
1809*4882a593Smuzhiyun 		break;
1810*4882a593Smuzhiyun #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1811*4882a593Smuzhiyun 	case KVM_REG_PPC_TFHAR:
1812*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.tfhar);
1813*4882a593Smuzhiyun 		break;
1814*4882a593Smuzhiyun 	case KVM_REG_PPC_TFIAR:
1815*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.tfiar);
1816*4882a593Smuzhiyun 		break;
1817*4882a593Smuzhiyun 	case KVM_REG_PPC_TEXASR:
1818*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.texasr);
1819*4882a593Smuzhiyun 		break;
1820*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
1821*4882a593Smuzhiyun 		i = id - KVM_REG_PPC_TM_GPR0;
1822*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.gpr_tm[i]);
1823*4882a593Smuzhiyun 		break;
1824*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
1825*4882a593Smuzhiyun 	{
1826*4882a593Smuzhiyun 		int j;
1827*4882a593Smuzhiyun 		i = id - KVM_REG_PPC_TM_VSR0;
1828*4882a593Smuzhiyun 		if (i < 32)
1829*4882a593Smuzhiyun 			for (j = 0; j < TS_FPRWIDTH; j++)
1830*4882a593Smuzhiyun 				val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j];
1831*4882a593Smuzhiyun 		else {
1832*4882a593Smuzhiyun 			if (cpu_has_feature(CPU_FTR_ALTIVEC))
1833*4882a593Smuzhiyun 				val->vval = vcpu->arch.vr_tm.vr[i-32];
1834*4882a593Smuzhiyun 			else
1835*4882a593Smuzhiyun 				r = -ENXIO;
1836*4882a593Smuzhiyun 		}
1837*4882a593Smuzhiyun 		break;
1838*4882a593Smuzhiyun 	}
1839*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_CR:
1840*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.cr_tm);
1841*4882a593Smuzhiyun 		break;
1842*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_XER:
1843*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.xer_tm);
1844*4882a593Smuzhiyun 		break;
1845*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_LR:
1846*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.lr_tm);
1847*4882a593Smuzhiyun 		break;
1848*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_CTR:
1849*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.ctr_tm);
1850*4882a593Smuzhiyun 		break;
1851*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_FPSCR:
1852*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.fp_tm.fpscr);
1853*4882a593Smuzhiyun 		break;
1854*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_AMR:
1855*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.amr_tm);
1856*4882a593Smuzhiyun 		break;
1857*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_PPR:
1858*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.ppr_tm);
1859*4882a593Smuzhiyun 		break;
1860*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_VRSAVE:
1861*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.vrsave_tm);
1862*4882a593Smuzhiyun 		break;
1863*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_VSCR:
1864*4882a593Smuzhiyun 		if (cpu_has_feature(CPU_FTR_ALTIVEC))
1865*4882a593Smuzhiyun 			*val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]);
1866*4882a593Smuzhiyun 		else
1867*4882a593Smuzhiyun 			r = -ENXIO;
1868*4882a593Smuzhiyun 		break;
1869*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_DSCR:
1870*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.dscr_tm);
1871*4882a593Smuzhiyun 		break;
1872*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_TAR:
1873*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.tar_tm);
1874*4882a593Smuzhiyun 		break;
1875*4882a593Smuzhiyun #endif
1876*4882a593Smuzhiyun 	case KVM_REG_PPC_ARCH_COMPAT:
1877*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.vcore->arch_compat);
1878*4882a593Smuzhiyun 		break;
1879*4882a593Smuzhiyun 	case KVM_REG_PPC_DEC_EXPIRY:
1880*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.dec_expires +
1881*4882a593Smuzhiyun 				   vcpu->arch.vcore->tb_offset);
1882*4882a593Smuzhiyun 		break;
1883*4882a593Smuzhiyun 	case KVM_REG_PPC_ONLINE:
1884*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.online);
1885*4882a593Smuzhiyun 		break;
1886*4882a593Smuzhiyun 	case KVM_REG_PPC_PTCR:
1887*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->kvm->arch.l1_ptcr);
1888*4882a593Smuzhiyun 		break;
1889*4882a593Smuzhiyun 	default:
1890*4882a593Smuzhiyun 		r = -EINVAL;
1891*4882a593Smuzhiyun 		break;
1892*4882a593Smuzhiyun 	}
1893*4882a593Smuzhiyun 
1894*4882a593Smuzhiyun 	return r;
1895*4882a593Smuzhiyun }
1896*4882a593Smuzhiyun 
kvmppc_set_one_reg_hv(struct kvm_vcpu * vcpu,u64 id,union kvmppc_one_reg * val)1897*4882a593Smuzhiyun static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1898*4882a593Smuzhiyun 				 union kvmppc_one_reg *val)
1899*4882a593Smuzhiyun {
1900*4882a593Smuzhiyun 	int r = 0;
1901*4882a593Smuzhiyun 	long int i;
1902*4882a593Smuzhiyun 	unsigned long addr, len;
1903*4882a593Smuzhiyun 
1904*4882a593Smuzhiyun 	switch (id) {
1905*4882a593Smuzhiyun 	case KVM_REG_PPC_HIOR:
1906*4882a593Smuzhiyun 		/* Only allow this to be set to zero */
1907*4882a593Smuzhiyun 		if (set_reg_val(id, *val))
1908*4882a593Smuzhiyun 			r = -EINVAL;
1909*4882a593Smuzhiyun 		break;
1910*4882a593Smuzhiyun 	case KVM_REG_PPC_DABR:
1911*4882a593Smuzhiyun 		vcpu->arch.dabr = set_reg_val(id, *val);
1912*4882a593Smuzhiyun 		break;
1913*4882a593Smuzhiyun 	case KVM_REG_PPC_DABRX:
1914*4882a593Smuzhiyun 		vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP;
1915*4882a593Smuzhiyun 		break;
1916*4882a593Smuzhiyun 	case KVM_REG_PPC_DSCR:
1917*4882a593Smuzhiyun 		vcpu->arch.dscr = set_reg_val(id, *val);
1918*4882a593Smuzhiyun 		break;
1919*4882a593Smuzhiyun 	case KVM_REG_PPC_PURR:
1920*4882a593Smuzhiyun 		vcpu->arch.purr = set_reg_val(id, *val);
1921*4882a593Smuzhiyun 		break;
1922*4882a593Smuzhiyun 	case KVM_REG_PPC_SPURR:
1923*4882a593Smuzhiyun 		vcpu->arch.spurr = set_reg_val(id, *val);
1924*4882a593Smuzhiyun 		break;
1925*4882a593Smuzhiyun 	case KVM_REG_PPC_AMR:
1926*4882a593Smuzhiyun 		vcpu->arch.amr = set_reg_val(id, *val);
1927*4882a593Smuzhiyun 		break;
1928*4882a593Smuzhiyun 	case KVM_REG_PPC_UAMOR:
1929*4882a593Smuzhiyun 		vcpu->arch.uamor = set_reg_val(id, *val);
1930*4882a593Smuzhiyun 		break;
1931*4882a593Smuzhiyun 	case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1:
1932*4882a593Smuzhiyun 		i = id - KVM_REG_PPC_MMCR0;
1933*4882a593Smuzhiyun 		vcpu->arch.mmcr[i] = set_reg_val(id, *val);
1934*4882a593Smuzhiyun 		break;
1935*4882a593Smuzhiyun 	case KVM_REG_PPC_MMCR2:
1936*4882a593Smuzhiyun 		vcpu->arch.mmcr[2] = set_reg_val(id, *val);
1937*4882a593Smuzhiyun 		break;
1938*4882a593Smuzhiyun 	case KVM_REG_PPC_MMCRA:
1939*4882a593Smuzhiyun 		vcpu->arch.mmcra = set_reg_val(id, *val);
1940*4882a593Smuzhiyun 		break;
1941*4882a593Smuzhiyun 	case KVM_REG_PPC_MMCRS:
1942*4882a593Smuzhiyun 		vcpu->arch.mmcrs = set_reg_val(id, *val);
1943*4882a593Smuzhiyun 		break;
1944*4882a593Smuzhiyun 	case KVM_REG_PPC_MMCR3:
1945*4882a593Smuzhiyun 		*val = get_reg_val(id, vcpu->arch.mmcr[3]);
1946*4882a593Smuzhiyun 		break;
1947*4882a593Smuzhiyun 	case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
1948*4882a593Smuzhiyun 		i = id - KVM_REG_PPC_PMC1;
1949*4882a593Smuzhiyun 		vcpu->arch.pmc[i] = set_reg_val(id, *val);
1950*4882a593Smuzhiyun 		break;
1951*4882a593Smuzhiyun 	case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
1952*4882a593Smuzhiyun 		i = id - KVM_REG_PPC_SPMC1;
1953*4882a593Smuzhiyun 		vcpu->arch.spmc[i] = set_reg_val(id, *val);
1954*4882a593Smuzhiyun 		break;
1955*4882a593Smuzhiyun 	case KVM_REG_PPC_SIAR:
1956*4882a593Smuzhiyun 		vcpu->arch.siar = set_reg_val(id, *val);
1957*4882a593Smuzhiyun 		break;
1958*4882a593Smuzhiyun 	case KVM_REG_PPC_SDAR:
1959*4882a593Smuzhiyun 		vcpu->arch.sdar = set_reg_val(id, *val);
1960*4882a593Smuzhiyun 		break;
1961*4882a593Smuzhiyun 	case KVM_REG_PPC_SIER:
1962*4882a593Smuzhiyun 		vcpu->arch.sier[0] = set_reg_val(id, *val);
1963*4882a593Smuzhiyun 		break;
1964*4882a593Smuzhiyun 	case KVM_REG_PPC_SIER2:
1965*4882a593Smuzhiyun 		vcpu->arch.sier[1] = set_reg_val(id, *val);
1966*4882a593Smuzhiyun 		break;
1967*4882a593Smuzhiyun 	case KVM_REG_PPC_SIER3:
1968*4882a593Smuzhiyun 		vcpu->arch.sier[2] = set_reg_val(id, *val);
1969*4882a593Smuzhiyun 		break;
1970*4882a593Smuzhiyun 	case KVM_REG_PPC_IAMR:
1971*4882a593Smuzhiyun 		vcpu->arch.iamr = set_reg_val(id, *val);
1972*4882a593Smuzhiyun 		break;
1973*4882a593Smuzhiyun 	case KVM_REG_PPC_PSPB:
1974*4882a593Smuzhiyun 		vcpu->arch.pspb = set_reg_val(id, *val);
1975*4882a593Smuzhiyun 		break;
1976*4882a593Smuzhiyun 	case KVM_REG_PPC_DPDES:
1977*4882a593Smuzhiyun 		vcpu->arch.vcore->dpdes = set_reg_val(id, *val);
1978*4882a593Smuzhiyun 		break;
1979*4882a593Smuzhiyun 	case KVM_REG_PPC_VTB:
1980*4882a593Smuzhiyun 		vcpu->arch.vcore->vtb = set_reg_val(id, *val);
1981*4882a593Smuzhiyun 		break;
1982*4882a593Smuzhiyun 	case KVM_REG_PPC_DAWR:
1983*4882a593Smuzhiyun 		vcpu->arch.dawr = set_reg_val(id, *val);
1984*4882a593Smuzhiyun 		break;
1985*4882a593Smuzhiyun 	case KVM_REG_PPC_DAWRX:
1986*4882a593Smuzhiyun 		vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP;
1987*4882a593Smuzhiyun 		break;
1988*4882a593Smuzhiyun 	case KVM_REG_PPC_CIABR:
1989*4882a593Smuzhiyun 		vcpu->arch.ciabr = set_reg_val(id, *val);
1990*4882a593Smuzhiyun 		/* Don't allow setting breakpoints in hypervisor code */
1991*4882a593Smuzhiyun 		if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
1992*4882a593Smuzhiyun 			vcpu->arch.ciabr &= ~CIABR_PRIV;	/* disable */
1993*4882a593Smuzhiyun 		break;
1994*4882a593Smuzhiyun 	case KVM_REG_PPC_CSIGR:
1995*4882a593Smuzhiyun 		vcpu->arch.csigr = set_reg_val(id, *val);
1996*4882a593Smuzhiyun 		break;
1997*4882a593Smuzhiyun 	case KVM_REG_PPC_TACR:
1998*4882a593Smuzhiyun 		vcpu->arch.tacr = set_reg_val(id, *val);
1999*4882a593Smuzhiyun 		break;
2000*4882a593Smuzhiyun 	case KVM_REG_PPC_TCSCR:
2001*4882a593Smuzhiyun 		vcpu->arch.tcscr = set_reg_val(id, *val);
2002*4882a593Smuzhiyun 		break;
2003*4882a593Smuzhiyun 	case KVM_REG_PPC_PID:
2004*4882a593Smuzhiyun 		vcpu->arch.pid = set_reg_val(id, *val);
2005*4882a593Smuzhiyun 		break;
2006*4882a593Smuzhiyun 	case KVM_REG_PPC_ACOP:
2007*4882a593Smuzhiyun 		vcpu->arch.acop = set_reg_val(id, *val);
2008*4882a593Smuzhiyun 		break;
2009*4882a593Smuzhiyun 	case KVM_REG_PPC_WORT:
2010*4882a593Smuzhiyun 		vcpu->arch.wort = set_reg_val(id, *val);
2011*4882a593Smuzhiyun 		break;
2012*4882a593Smuzhiyun 	case KVM_REG_PPC_TIDR:
2013*4882a593Smuzhiyun 		vcpu->arch.tid = set_reg_val(id, *val);
2014*4882a593Smuzhiyun 		break;
2015*4882a593Smuzhiyun 	case KVM_REG_PPC_PSSCR:
2016*4882a593Smuzhiyun 		vcpu->arch.psscr = set_reg_val(id, *val) & PSSCR_GUEST_VIS;
2017*4882a593Smuzhiyun 		break;
2018*4882a593Smuzhiyun 	case KVM_REG_PPC_VPA_ADDR:
2019*4882a593Smuzhiyun 		addr = set_reg_val(id, *val);
2020*4882a593Smuzhiyun 		r = -EINVAL;
2021*4882a593Smuzhiyun 		if (!addr && (vcpu->arch.slb_shadow.next_gpa ||
2022*4882a593Smuzhiyun 			      vcpu->arch.dtl.next_gpa))
2023*4882a593Smuzhiyun 			break;
2024*4882a593Smuzhiyun 		r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca));
2025*4882a593Smuzhiyun 		break;
2026*4882a593Smuzhiyun 	case KVM_REG_PPC_VPA_SLB:
2027*4882a593Smuzhiyun 		addr = val->vpaval.addr;
2028*4882a593Smuzhiyun 		len = val->vpaval.length;
2029*4882a593Smuzhiyun 		r = -EINVAL;
2030*4882a593Smuzhiyun 		if (addr && !vcpu->arch.vpa.next_gpa)
2031*4882a593Smuzhiyun 			break;
2032*4882a593Smuzhiyun 		r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len);
2033*4882a593Smuzhiyun 		break;
2034*4882a593Smuzhiyun 	case KVM_REG_PPC_VPA_DTL:
2035*4882a593Smuzhiyun 		addr = val->vpaval.addr;
2036*4882a593Smuzhiyun 		len = val->vpaval.length;
2037*4882a593Smuzhiyun 		r = -EINVAL;
2038*4882a593Smuzhiyun 		if (addr && (len < sizeof(struct dtl_entry) ||
2039*4882a593Smuzhiyun 			     !vcpu->arch.vpa.next_gpa))
2040*4882a593Smuzhiyun 			break;
2041*4882a593Smuzhiyun 		len -= len % sizeof(struct dtl_entry);
2042*4882a593Smuzhiyun 		r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
2043*4882a593Smuzhiyun 		break;
2044*4882a593Smuzhiyun 	case KVM_REG_PPC_TB_OFFSET:
2045*4882a593Smuzhiyun 		/* round up to multiple of 2^24 */
2046*4882a593Smuzhiyun 		vcpu->arch.vcore->tb_offset =
2047*4882a593Smuzhiyun 			ALIGN(set_reg_val(id, *val), 1UL << 24);
2048*4882a593Smuzhiyun 		break;
2049*4882a593Smuzhiyun 	case KVM_REG_PPC_LPCR:
2050*4882a593Smuzhiyun 		kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), true);
2051*4882a593Smuzhiyun 		break;
2052*4882a593Smuzhiyun 	case KVM_REG_PPC_LPCR_64:
2053*4882a593Smuzhiyun 		kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false);
2054*4882a593Smuzhiyun 		break;
2055*4882a593Smuzhiyun 	case KVM_REG_PPC_PPR:
2056*4882a593Smuzhiyun 		vcpu->arch.ppr = set_reg_val(id, *val);
2057*4882a593Smuzhiyun 		break;
2058*4882a593Smuzhiyun #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2059*4882a593Smuzhiyun 	case KVM_REG_PPC_TFHAR:
2060*4882a593Smuzhiyun 		vcpu->arch.tfhar = set_reg_val(id, *val);
2061*4882a593Smuzhiyun 		break;
2062*4882a593Smuzhiyun 	case KVM_REG_PPC_TFIAR:
2063*4882a593Smuzhiyun 		vcpu->arch.tfiar = set_reg_val(id, *val);
2064*4882a593Smuzhiyun 		break;
2065*4882a593Smuzhiyun 	case KVM_REG_PPC_TEXASR:
2066*4882a593Smuzhiyun 		vcpu->arch.texasr = set_reg_val(id, *val);
2067*4882a593Smuzhiyun 		break;
2068*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
2069*4882a593Smuzhiyun 		i = id - KVM_REG_PPC_TM_GPR0;
2070*4882a593Smuzhiyun 		vcpu->arch.gpr_tm[i] = set_reg_val(id, *val);
2071*4882a593Smuzhiyun 		break;
2072*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
2073*4882a593Smuzhiyun 	{
2074*4882a593Smuzhiyun 		int j;
2075*4882a593Smuzhiyun 		i = id - KVM_REG_PPC_TM_VSR0;
2076*4882a593Smuzhiyun 		if (i < 32)
2077*4882a593Smuzhiyun 			for (j = 0; j < TS_FPRWIDTH; j++)
2078*4882a593Smuzhiyun 				vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j];
2079*4882a593Smuzhiyun 		else
2080*4882a593Smuzhiyun 			if (cpu_has_feature(CPU_FTR_ALTIVEC))
2081*4882a593Smuzhiyun 				vcpu->arch.vr_tm.vr[i-32] = val->vval;
2082*4882a593Smuzhiyun 			else
2083*4882a593Smuzhiyun 				r = -ENXIO;
2084*4882a593Smuzhiyun 		break;
2085*4882a593Smuzhiyun 	}
2086*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_CR:
2087*4882a593Smuzhiyun 		vcpu->arch.cr_tm = set_reg_val(id, *val);
2088*4882a593Smuzhiyun 		break;
2089*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_XER:
2090*4882a593Smuzhiyun 		vcpu->arch.xer_tm = set_reg_val(id, *val);
2091*4882a593Smuzhiyun 		break;
2092*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_LR:
2093*4882a593Smuzhiyun 		vcpu->arch.lr_tm = set_reg_val(id, *val);
2094*4882a593Smuzhiyun 		break;
2095*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_CTR:
2096*4882a593Smuzhiyun 		vcpu->arch.ctr_tm = set_reg_val(id, *val);
2097*4882a593Smuzhiyun 		break;
2098*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_FPSCR:
2099*4882a593Smuzhiyun 		vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val);
2100*4882a593Smuzhiyun 		break;
2101*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_AMR:
2102*4882a593Smuzhiyun 		vcpu->arch.amr_tm = set_reg_val(id, *val);
2103*4882a593Smuzhiyun 		break;
2104*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_PPR:
2105*4882a593Smuzhiyun 		vcpu->arch.ppr_tm = set_reg_val(id, *val);
2106*4882a593Smuzhiyun 		break;
2107*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_VRSAVE:
2108*4882a593Smuzhiyun 		vcpu->arch.vrsave_tm = set_reg_val(id, *val);
2109*4882a593Smuzhiyun 		break;
2110*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_VSCR:
2111*4882a593Smuzhiyun 		if (cpu_has_feature(CPU_FTR_ALTIVEC))
2112*4882a593Smuzhiyun 			vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val);
2113*4882a593Smuzhiyun 		else
2114*4882a593Smuzhiyun 			r = - ENXIO;
2115*4882a593Smuzhiyun 		break;
2116*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_DSCR:
2117*4882a593Smuzhiyun 		vcpu->arch.dscr_tm = set_reg_val(id, *val);
2118*4882a593Smuzhiyun 		break;
2119*4882a593Smuzhiyun 	case KVM_REG_PPC_TM_TAR:
2120*4882a593Smuzhiyun 		vcpu->arch.tar_tm = set_reg_val(id, *val);
2121*4882a593Smuzhiyun 		break;
2122*4882a593Smuzhiyun #endif
2123*4882a593Smuzhiyun 	case KVM_REG_PPC_ARCH_COMPAT:
2124*4882a593Smuzhiyun 		r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val));
2125*4882a593Smuzhiyun 		break;
2126*4882a593Smuzhiyun 	case KVM_REG_PPC_DEC_EXPIRY:
2127*4882a593Smuzhiyun 		vcpu->arch.dec_expires = set_reg_val(id, *val) -
2128*4882a593Smuzhiyun 			vcpu->arch.vcore->tb_offset;
2129*4882a593Smuzhiyun 		break;
2130*4882a593Smuzhiyun 	case KVM_REG_PPC_ONLINE:
2131*4882a593Smuzhiyun 		i = set_reg_val(id, *val);
2132*4882a593Smuzhiyun 		if (i && !vcpu->arch.online)
2133*4882a593Smuzhiyun 			atomic_inc(&vcpu->arch.vcore->online_count);
2134*4882a593Smuzhiyun 		else if (!i && vcpu->arch.online)
2135*4882a593Smuzhiyun 			atomic_dec(&vcpu->arch.vcore->online_count);
2136*4882a593Smuzhiyun 		vcpu->arch.online = i;
2137*4882a593Smuzhiyun 		break;
2138*4882a593Smuzhiyun 	case KVM_REG_PPC_PTCR:
2139*4882a593Smuzhiyun 		vcpu->kvm->arch.l1_ptcr = set_reg_val(id, *val);
2140*4882a593Smuzhiyun 		break;
2141*4882a593Smuzhiyun 	default:
2142*4882a593Smuzhiyun 		r = -EINVAL;
2143*4882a593Smuzhiyun 		break;
2144*4882a593Smuzhiyun 	}
2145*4882a593Smuzhiyun 
2146*4882a593Smuzhiyun 	return r;
2147*4882a593Smuzhiyun }
2148*4882a593Smuzhiyun 
2149*4882a593Smuzhiyun /*
2150*4882a593Smuzhiyun  * On POWER9, threads are independent and can be in different partitions.
2151*4882a593Smuzhiyun  * Therefore we consider each thread to be a subcore.
2152*4882a593Smuzhiyun  * There is a restriction that all threads have to be in the same
2153*4882a593Smuzhiyun  * MMU mode (radix or HPT), unfortunately, but since we only support
2154*4882a593Smuzhiyun  * HPT guests on a HPT host so far, that isn't an impediment yet.
2155*4882a593Smuzhiyun  */
threads_per_vcore(struct kvm * kvm)2156*4882a593Smuzhiyun static int threads_per_vcore(struct kvm *kvm)
2157*4882a593Smuzhiyun {
2158*4882a593Smuzhiyun 	if (kvm->arch.threads_indep)
2159*4882a593Smuzhiyun 		return 1;
2160*4882a593Smuzhiyun 	return threads_per_subcore;
2161*4882a593Smuzhiyun }
2162*4882a593Smuzhiyun 
kvmppc_vcore_create(struct kvm * kvm,int id)2163*4882a593Smuzhiyun static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int id)
2164*4882a593Smuzhiyun {
2165*4882a593Smuzhiyun 	struct kvmppc_vcore *vcore;
2166*4882a593Smuzhiyun 
2167*4882a593Smuzhiyun 	vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
2168*4882a593Smuzhiyun 
2169*4882a593Smuzhiyun 	if (vcore == NULL)
2170*4882a593Smuzhiyun 		return NULL;
2171*4882a593Smuzhiyun 
2172*4882a593Smuzhiyun 	spin_lock_init(&vcore->lock);
2173*4882a593Smuzhiyun 	spin_lock_init(&vcore->stoltb_lock);
2174*4882a593Smuzhiyun 	rcuwait_init(&vcore->wait);
2175*4882a593Smuzhiyun 	vcore->preempt_tb = TB_NIL;
2176*4882a593Smuzhiyun 	vcore->lpcr = kvm->arch.lpcr;
2177*4882a593Smuzhiyun 	vcore->first_vcpuid = id;
2178*4882a593Smuzhiyun 	vcore->kvm = kvm;
2179*4882a593Smuzhiyun 	INIT_LIST_HEAD(&vcore->preempt_list);
2180*4882a593Smuzhiyun 
2181*4882a593Smuzhiyun 	return vcore;
2182*4882a593Smuzhiyun }
2183*4882a593Smuzhiyun 
2184*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2185*4882a593Smuzhiyun static struct debugfs_timings_element {
2186*4882a593Smuzhiyun 	const char *name;
2187*4882a593Smuzhiyun 	size_t offset;
2188*4882a593Smuzhiyun } timings[] = {
2189*4882a593Smuzhiyun 	{"rm_entry",	offsetof(struct kvm_vcpu, arch.rm_entry)},
2190*4882a593Smuzhiyun 	{"rm_intr",	offsetof(struct kvm_vcpu, arch.rm_intr)},
2191*4882a593Smuzhiyun 	{"rm_exit",	offsetof(struct kvm_vcpu, arch.rm_exit)},
2192*4882a593Smuzhiyun 	{"guest",	offsetof(struct kvm_vcpu, arch.guest_time)},
2193*4882a593Smuzhiyun 	{"cede",	offsetof(struct kvm_vcpu, arch.cede_time)},
2194*4882a593Smuzhiyun };
2195*4882a593Smuzhiyun 
2196*4882a593Smuzhiyun #define N_TIMINGS	(ARRAY_SIZE(timings))
2197*4882a593Smuzhiyun 
2198*4882a593Smuzhiyun struct debugfs_timings_state {
2199*4882a593Smuzhiyun 	struct kvm_vcpu	*vcpu;
2200*4882a593Smuzhiyun 	unsigned int	buflen;
2201*4882a593Smuzhiyun 	char		buf[N_TIMINGS * 100];
2202*4882a593Smuzhiyun };
2203*4882a593Smuzhiyun 
debugfs_timings_open(struct inode * inode,struct file * file)2204*4882a593Smuzhiyun static int debugfs_timings_open(struct inode *inode, struct file *file)
2205*4882a593Smuzhiyun {
2206*4882a593Smuzhiyun 	struct kvm_vcpu *vcpu = inode->i_private;
2207*4882a593Smuzhiyun 	struct debugfs_timings_state *p;
2208*4882a593Smuzhiyun 
2209*4882a593Smuzhiyun 	p = kzalloc(sizeof(*p), GFP_KERNEL);
2210*4882a593Smuzhiyun 	if (!p)
2211*4882a593Smuzhiyun 		return -ENOMEM;
2212*4882a593Smuzhiyun 
2213*4882a593Smuzhiyun 	kvm_get_kvm(vcpu->kvm);
2214*4882a593Smuzhiyun 	p->vcpu = vcpu;
2215*4882a593Smuzhiyun 	file->private_data = p;
2216*4882a593Smuzhiyun 
2217*4882a593Smuzhiyun 	return nonseekable_open(inode, file);
2218*4882a593Smuzhiyun }
2219*4882a593Smuzhiyun 
debugfs_timings_release(struct inode * inode,struct file * file)2220*4882a593Smuzhiyun static int debugfs_timings_release(struct inode *inode, struct file *file)
2221*4882a593Smuzhiyun {
2222*4882a593Smuzhiyun 	struct debugfs_timings_state *p = file->private_data;
2223*4882a593Smuzhiyun 
2224*4882a593Smuzhiyun 	kvm_put_kvm(p->vcpu->kvm);
2225*4882a593Smuzhiyun 	kfree(p);
2226*4882a593Smuzhiyun 	return 0;
2227*4882a593Smuzhiyun }
2228*4882a593Smuzhiyun 
debugfs_timings_read(struct file * file,char __user * buf,size_t len,loff_t * ppos)2229*4882a593Smuzhiyun static ssize_t debugfs_timings_read(struct file *file, char __user *buf,
2230*4882a593Smuzhiyun 				    size_t len, loff_t *ppos)
2231*4882a593Smuzhiyun {
2232*4882a593Smuzhiyun 	struct debugfs_timings_state *p = file->private_data;
2233*4882a593Smuzhiyun 	struct kvm_vcpu *vcpu = p->vcpu;
2234*4882a593Smuzhiyun 	char *s, *buf_end;
2235*4882a593Smuzhiyun 	struct kvmhv_tb_accumulator tb;
2236*4882a593Smuzhiyun 	u64 count;
2237*4882a593Smuzhiyun 	loff_t pos;
2238*4882a593Smuzhiyun 	ssize_t n;
2239*4882a593Smuzhiyun 	int i, loops;
2240*4882a593Smuzhiyun 	bool ok;
2241*4882a593Smuzhiyun 
2242*4882a593Smuzhiyun 	if (!p->buflen) {
2243*4882a593Smuzhiyun 		s = p->buf;
2244*4882a593Smuzhiyun 		buf_end = s + sizeof(p->buf);
2245*4882a593Smuzhiyun 		for (i = 0; i < N_TIMINGS; ++i) {
2246*4882a593Smuzhiyun 			struct kvmhv_tb_accumulator *acc;
2247*4882a593Smuzhiyun 
2248*4882a593Smuzhiyun 			acc = (struct kvmhv_tb_accumulator *)
2249*4882a593Smuzhiyun 				((unsigned long)vcpu + timings[i].offset);
2250*4882a593Smuzhiyun 			ok = false;
2251*4882a593Smuzhiyun 			for (loops = 0; loops < 1000; ++loops) {
2252*4882a593Smuzhiyun 				count = acc->seqcount;
2253*4882a593Smuzhiyun 				if (!(count & 1)) {
2254*4882a593Smuzhiyun 					smp_rmb();
2255*4882a593Smuzhiyun 					tb = *acc;
2256*4882a593Smuzhiyun 					smp_rmb();
2257*4882a593Smuzhiyun 					if (count == acc->seqcount) {
2258*4882a593Smuzhiyun 						ok = true;
2259*4882a593Smuzhiyun 						break;
2260*4882a593Smuzhiyun 					}
2261*4882a593Smuzhiyun 				}
2262*4882a593Smuzhiyun 				udelay(1);
2263*4882a593Smuzhiyun 			}
2264*4882a593Smuzhiyun 			if (!ok)
2265*4882a593Smuzhiyun 				snprintf(s, buf_end - s, "%s: stuck\n",
2266*4882a593Smuzhiyun 					timings[i].name);
2267*4882a593Smuzhiyun 			else
2268*4882a593Smuzhiyun 				snprintf(s, buf_end - s,
2269*4882a593Smuzhiyun 					"%s: %llu %llu %llu %llu\n",
2270*4882a593Smuzhiyun 					timings[i].name, count / 2,
2271*4882a593Smuzhiyun 					tb_to_ns(tb.tb_total),
2272*4882a593Smuzhiyun 					tb_to_ns(tb.tb_min),
2273*4882a593Smuzhiyun 					tb_to_ns(tb.tb_max));
2274*4882a593Smuzhiyun 			s += strlen(s);
2275*4882a593Smuzhiyun 		}
2276*4882a593Smuzhiyun 		p->buflen = s - p->buf;
2277*4882a593Smuzhiyun 	}
2278*4882a593Smuzhiyun 
2279*4882a593Smuzhiyun 	pos = *ppos;
2280*4882a593Smuzhiyun 	if (pos >= p->buflen)
2281*4882a593Smuzhiyun 		return 0;
2282*4882a593Smuzhiyun 	if (len > p->buflen - pos)
2283*4882a593Smuzhiyun 		len = p->buflen - pos;
2284*4882a593Smuzhiyun 	n = copy_to_user(buf, p->buf + pos, len);
2285*4882a593Smuzhiyun 	if (n) {
2286*4882a593Smuzhiyun 		if (n == len)
2287*4882a593Smuzhiyun 			return -EFAULT;
2288*4882a593Smuzhiyun 		len -= n;
2289*4882a593Smuzhiyun 	}
2290*4882a593Smuzhiyun 	*ppos = pos + len;
2291*4882a593Smuzhiyun 	return len;
2292*4882a593Smuzhiyun }
2293*4882a593Smuzhiyun 
debugfs_timings_write(struct file * file,const char __user * buf,size_t len,loff_t * ppos)2294*4882a593Smuzhiyun static ssize_t debugfs_timings_write(struct file *file, const char __user *buf,
2295*4882a593Smuzhiyun 				     size_t len, loff_t *ppos)
2296*4882a593Smuzhiyun {
2297*4882a593Smuzhiyun 	return -EACCES;
2298*4882a593Smuzhiyun }
2299*4882a593Smuzhiyun 
2300*4882a593Smuzhiyun static const struct file_operations debugfs_timings_ops = {
2301*4882a593Smuzhiyun 	.owner	 = THIS_MODULE,
2302*4882a593Smuzhiyun 	.open	 = debugfs_timings_open,
2303*4882a593Smuzhiyun 	.release = debugfs_timings_release,
2304*4882a593Smuzhiyun 	.read	 = debugfs_timings_read,
2305*4882a593Smuzhiyun 	.write	 = debugfs_timings_write,
2306*4882a593Smuzhiyun 	.llseek	 = generic_file_llseek,
2307*4882a593Smuzhiyun };
2308*4882a593Smuzhiyun 
2309*4882a593Smuzhiyun /* Create a debugfs directory for the vcpu */
debugfs_vcpu_init(struct kvm_vcpu * vcpu,unsigned int id)2310*4882a593Smuzhiyun static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id)
2311*4882a593Smuzhiyun {
2312*4882a593Smuzhiyun 	char buf[16];
2313*4882a593Smuzhiyun 	struct kvm *kvm = vcpu->kvm;
2314*4882a593Smuzhiyun 
2315*4882a593Smuzhiyun 	snprintf(buf, sizeof(buf), "vcpu%u", id);
2316*4882a593Smuzhiyun 	vcpu->arch.debugfs_dir = debugfs_create_dir(buf, kvm->arch.debugfs_dir);
2317*4882a593Smuzhiyun 	debugfs_create_file("timings", 0444, vcpu->arch.debugfs_dir, vcpu,
2318*4882a593Smuzhiyun 			    &debugfs_timings_ops);
2319*4882a593Smuzhiyun }
2320*4882a593Smuzhiyun 
2321*4882a593Smuzhiyun #else /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
debugfs_vcpu_init(struct kvm_vcpu * vcpu,unsigned int id)2322*4882a593Smuzhiyun static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id)
2323*4882a593Smuzhiyun {
2324*4882a593Smuzhiyun }
2325*4882a593Smuzhiyun #endif /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
2326*4882a593Smuzhiyun 
kvmppc_core_vcpu_create_hv(struct kvm_vcpu * vcpu)2327*4882a593Smuzhiyun static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
2328*4882a593Smuzhiyun {
2329*4882a593Smuzhiyun 	int err;
2330*4882a593Smuzhiyun 	int core;
2331*4882a593Smuzhiyun 	struct kvmppc_vcore *vcore;
2332*4882a593Smuzhiyun 	struct kvm *kvm;
2333*4882a593Smuzhiyun 	unsigned int id;
2334*4882a593Smuzhiyun 
2335*4882a593Smuzhiyun 	kvm = vcpu->kvm;
2336*4882a593Smuzhiyun 	id = vcpu->vcpu_id;
2337*4882a593Smuzhiyun 
2338*4882a593Smuzhiyun 	vcpu->arch.shared = &vcpu->arch.shregs;
2339*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2340*4882a593Smuzhiyun 	/*
2341*4882a593Smuzhiyun 	 * The shared struct is never shared on HV,
2342*4882a593Smuzhiyun 	 * so we can always use host endianness
2343*4882a593Smuzhiyun 	 */
2344*4882a593Smuzhiyun #ifdef __BIG_ENDIAN__
2345*4882a593Smuzhiyun 	vcpu->arch.shared_big_endian = true;
2346*4882a593Smuzhiyun #else
2347*4882a593Smuzhiyun 	vcpu->arch.shared_big_endian = false;
2348*4882a593Smuzhiyun #endif
2349*4882a593Smuzhiyun #endif
2350*4882a593Smuzhiyun 	vcpu->arch.mmcr[0] = MMCR0_FC;
2351*4882a593Smuzhiyun 	vcpu->arch.ctrl = CTRL_RUNLATCH;
2352*4882a593Smuzhiyun 	/* default to host PVR, since we can't spoof it */
2353*4882a593Smuzhiyun 	kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR));
2354*4882a593Smuzhiyun 	spin_lock_init(&vcpu->arch.vpa_update_lock);
2355*4882a593Smuzhiyun 	spin_lock_init(&vcpu->arch.tbacct_lock);
2356*4882a593Smuzhiyun 	vcpu->arch.busy_preempt = TB_NIL;
2357*4882a593Smuzhiyun 	vcpu->arch.intr_msr = MSR_SF | MSR_ME;
2358*4882a593Smuzhiyun 
2359*4882a593Smuzhiyun 	/*
2360*4882a593Smuzhiyun 	 * Set the default HFSCR for the guest from the host value.
2361*4882a593Smuzhiyun 	 * This value is only used on POWER9.
2362*4882a593Smuzhiyun 	 * On POWER9, we want to virtualize the doorbell facility, so we
2363*4882a593Smuzhiyun 	 * don't set the HFSCR_MSGP bit, and that causes those instructions
2364*4882a593Smuzhiyun 	 * to trap and then we emulate them.
2365*4882a593Smuzhiyun 	 */
2366*4882a593Smuzhiyun 	vcpu->arch.hfscr = HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB |
2367*4882a593Smuzhiyun 		HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP | HFSCR_PREFIX;
2368*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_HVMODE)) {
2369*4882a593Smuzhiyun 		vcpu->arch.hfscr &= mfspr(SPRN_HFSCR);
2370*4882a593Smuzhiyun #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2371*4882a593Smuzhiyun 		if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
2372*4882a593Smuzhiyun 			vcpu->arch.hfscr |= HFSCR_TM;
2373*4882a593Smuzhiyun #endif
2374*4882a593Smuzhiyun 	}
2375*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_TM_COMP))
2376*4882a593Smuzhiyun 		vcpu->arch.hfscr |= HFSCR_TM;
2377*4882a593Smuzhiyun 
2378*4882a593Smuzhiyun 	kvmppc_mmu_book3s_hv_init(vcpu);
2379*4882a593Smuzhiyun 
2380*4882a593Smuzhiyun 	vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
2381*4882a593Smuzhiyun 
2382*4882a593Smuzhiyun 	init_waitqueue_head(&vcpu->arch.cpu_run);
2383*4882a593Smuzhiyun 
2384*4882a593Smuzhiyun 	mutex_lock(&kvm->lock);
2385*4882a593Smuzhiyun 	vcore = NULL;
2386*4882a593Smuzhiyun 	err = -EINVAL;
2387*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
2388*4882a593Smuzhiyun 		if (id >= (KVM_MAX_VCPUS * kvm->arch.emul_smt_mode)) {
2389*4882a593Smuzhiyun 			pr_devel("KVM: VCPU ID too high\n");
2390*4882a593Smuzhiyun 			core = KVM_MAX_VCORES;
2391*4882a593Smuzhiyun 		} else {
2392*4882a593Smuzhiyun 			BUG_ON(kvm->arch.smt_mode != 1);
2393*4882a593Smuzhiyun 			core = kvmppc_pack_vcpu_id(kvm, id);
2394*4882a593Smuzhiyun 		}
2395*4882a593Smuzhiyun 	} else {
2396*4882a593Smuzhiyun 		core = id / kvm->arch.smt_mode;
2397*4882a593Smuzhiyun 	}
2398*4882a593Smuzhiyun 	if (core < KVM_MAX_VCORES) {
2399*4882a593Smuzhiyun 		vcore = kvm->arch.vcores[core];
2400*4882a593Smuzhiyun 		if (vcore && cpu_has_feature(CPU_FTR_ARCH_300)) {
2401*4882a593Smuzhiyun 			pr_devel("KVM: collision on id %u", id);
2402*4882a593Smuzhiyun 			vcore = NULL;
2403*4882a593Smuzhiyun 		} else if (!vcore) {
2404*4882a593Smuzhiyun 			/*
2405*4882a593Smuzhiyun 			 * Take mmu_setup_lock for mutual exclusion
2406*4882a593Smuzhiyun 			 * with kvmppc_update_lpcr().
2407*4882a593Smuzhiyun 			 */
2408*4882a593Smuzhiyun 			err = -ENOMEM;
2409*4882a593Smuzhiyun 			vcore = kvmppc_vcore_create(kvm,
2410*4882a593Smuzhiyun 					id & ~(kvm->arch.smt_mode - 1));
2411*4882a593Smuzhiyun 			mutex_lock(&kvm->arch.mmu_setup_lock);
2412*4882a593Smuzhiyun 			kvm->arch.vcores[core] = vcore;
2413*4882a593Smuzhiyun 			kvm->arch.online_vcores++;
2414*4882a593Smuzhiyun 			mutex_unlock(&kvm->arch.mmu_setup_lock);
2415*4882a593Smuzhiyun 		}
2416*4882a593Smuzhiyun 	}
2417*4882a593Smuzhiyun 	mutex_unlock(&kvm->lock);
2418*4882a593Smuzhiyun 
2419*4882a593Smuzhiyun 	if (!vcore)
2420*4882a593Smuzhiyun 		return err;
2421*4882a593Smuzhiyun 
2422*4882a593Smuzhiyun 	spin_lock(&vcore->lock);
2423*4882a593Smuzhiyun 	++vcore->num_threads;
2424*4882a593Smuzhiyun 	spin_unlock(&vcore->lock);
2425*4882a593Smuzhiyun 	vcpu->arch.vcore = vcore;
2426*4882a593Smuzhiyun 	vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid;
2427*4882a593Smuzhiyun 	vcpu->arch.thread_cpu = -1;
2428*4882a593Smuzhiyun 	vcpu->arch.prev_cpu = -1;
2429*4882a593Smuzhiyun 
2430*4882a593Smuzhiyun 	vcpu->arch.cpu_type = KVM_CPU_3S_64;
2431*4882a593Smuzhiyun 	kvmppc_sanity_check(vcpu);
2432*4882a593Smuzhiyun 
2433*4882a593Smuzhiyun 	debugfs_vcpu_init(vcpu, id);
2434*4882a593Smuzhiyun 
2435*4882a593Smuzhiyun 	return 0;
2436*4882a593Smuzhiyun }
2437*4882a593Smuzhiyun 
kvmhv_set_smt_mode(struct kvm * kvm,unsigned long smt_mode,unsigned long flags)2438*4882a593Smuzhiyun static int kvmhv_set_smt_mode(struct kvm *kvm, unsigned long smt_mode,
2439*4882a593Smuzhiyun 			      unsigned long flags)
2440*4882a593Smuzhiyun {
2441*4882a593Smuzhiyun 	int err;
2442*4882a593Smuzhiyun 	int esmt = 0;
2443*4882a593Smuzhiyun 
2444*4882a593Smuzhiyun 	if (flags)
2445*4882a593Smuzhiyun 		return -EINVAL;
2446*4882a593Smuzhiyun 	if (smt_mode > MAX_SMT_THREADS || !is_power_of_2(smt_mode))
2447*4882a593Smuzhiyun 		return -EINVAL;
2448*4882a593Smuzhiyun 	if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
2449*4882a593Smuzhiyun 		/*
2450*4882a593Smuzhiyun 		 * On POWER8 (or POWER7), the threading mode is "strict",
2451*4882a593Smuzhiyun 		 * so we pack smt_mode vcpus per vcore.
2452*4882a593Smuzhiyun 		 */
2453*4882a593Smuzhiyun 		if (smt_mode > threads_per_subcore)
2454*4882a593Smuzhiyun 			return -EINVAL;
2455*4882a593Smuzhiyun 	} else {
2456*4882a593Smuzhiyun 		/*
2457*4882a593Smuzhiyun 		 * On POWER9, the threading mode is "loose",
2458*4882a593Smuzhiyun 		 * so each vcpu gets its own vcore.
2459*4882a593Smuzhiyun 		 */
2460*4882a593Smuzhiyun 		esmt = smt_mode;
2461*4882a593Smuzhiyun 		smt_mode = 1;
2462*4882a593Smuzhiyun 	}
2463*4882a593Smuzhiyun 	mutex_lock(&kvm->lock);
2464*4882a593Smuzhiyun 	err = -EBUSY;
2465*4882a593Smuzhiyun 	if (!kvm->arch.online_vcores) {
2466*4882a593Smuzhiyun 		kvm->arch.smt_mode = smt_mode;
2467*4882a593Smuzhiyun 		kvm->arch.emul_smt_mode = esmt;
2468*4882a593Smuzhiyun 		err = 0;
2469*4882a593Smuzhiyun 	}
2470*4882a593Smuzhiyun 	mutex_unlock(&kvm->lock);
2471*4882a593Smuzhiyun 
2472*4882a593Smuzhiyun 	return err;
2473*4882a593Smuzhiyun }
2474*4882a593Smuzhiyun 
unpin_vpa(struct kvm * kvm,struct kvmppc_vpa * vpa)2475*4882a593Smuzhiyun static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa)
2476*4882a593Smuzhiyun {
2477*4882a593Smuzhiyun 	if (vpa->pinned_addr)
2478*4882a593Smuzhiyun 		kvmppc_unpin_guest_page(kvm, vpa->pinned_addr, vpa->gpa,
2479*4882a593Smuzhiyun 					vpa->dirty);
2480*4882a593Smuzhiyun }
2481*4882a593Smuzhiyun 
kvmppc_core_vcpu_free_hv(struct kvm_vcpu * vcpu)2482*4882a593Smuzhiyun static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu)
2483*4882a593Smuzhiyun {
2484*4882a593Smuzhiyun 	spin_lock(&vcpu->arch.vpa_update_lock);
2485*4882a593Smuzhiyun 	unpin_vpa(vcpu->kvm, &vcpu->arch.dtl);
2486*4882a593Smuzhiyun 	unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow);
2487*4882a593Smuzhiyun 	unpin_vpa(vcpu->kvm, &vcpu->arch.vpa);
2488*4882a593Smuzhiyun 	spin_unlock(&vcpu->arch.vpa_update_lock);
2489*4882a593Smuzhiyun }
2490*4882a593Smuzhiyun 
kvmppc_core_check_requests_hv(struct kvm_vcpu * vcpu)2491*4882a593Smuzhiyun static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu)
2492*4882a593Smuzhiyun {
2493*4882a593Smuzhiyun 	/* Indicate we want to get back into the guest */
2494*4882a593Smuzhiyun 	return 1;
2495*4882a593Smuzhiyun }
2496*4882a593Smuzhiyun 
kvmppc_set_timer(struct kvm_vcpu * vcpu)2497*4882a593Smuzhiyun static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
2498*4882a593Smuzhiyun {
2499*4882a593Smuzhiyun 	unsigned long dec_nsec, now;
2500*4882a593Smuzhiyun 
2501*4882a593Smuzhiyun 	now = get_tb();
2502*4882a593Smuzhiyun 	if (now > vcpu->arch.dec_expires) {
2503*4882a593Smuzhiyun 		/* decrementer has already gone negative */
2504*4882a593Smuzhiyun 		kvmppc_core_queue_dec(vcpu);
2505*4882a593Smuzhiyun 		kvmppc_core_prepare_to_enter(vcpu);
2506*4882a593Smuzhiyun 		return;
2507*4882a593Smuzhiyun 	}
2508*4882a593Smuzhiyun 	dec_nsec = tb_to_ns(vcpu->arch.dec_expires - now);
2509*4882a593Smuzhiyun 	hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL);
2510*4882a593Smuzhiyun 	vcpu->arch.timer_running = 1;
2511*4882a593Smuzhiyun }
2512*4882a593Smuzhiyun 
2513*4882a593Smuzhiyun extern int __kvmppc_vcore_entry(void);
2514*4882a593Smuzhiyun 
kvmppc_remove_runnable(struct kvmppc_vcore * vc,struct kvm_vcpu * vcpu)2515*4882a593Smuzhiyun static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
2516*4882a593Smuzhiyun 				   struct kvm_vcpu *vcpu)
2517*4882a593Smuzhiyun {
2518*4882a593Smuzhiyun 	u64 now;
2519*4882a593Smuzhiyun 
2520*4882a593Smuzhiyun 	if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
2521*4882a593Smuzhiyun 		return;
2522*4882a593Smuzhiyun 	spin_lock_irq(&vcpu->arch.tbacct_lock);
2523*4882a593Smuzhiyun 	now = mftb();
2524*4882a593Smuzhiyun 	vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
2525*4882a593Smuzhiyun 		vcpu->arch.stolen_logged;
2526*4882a593Smuzhiyun 	vcpu->arch.busy_preempt = now;
2527*4882a593Smuzhiyun 	vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
2528*4882a593Smuzhiyun 	spin_unlock_irq(&vcpu->arch.tbacct_lock);
2529*4882a593Smuzhiyun 	--vc->n_runnable;
2530*4882a593Smuzhiyun 	WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], NULL);
2531*4882a593Smuzhiyun }
2532*4882a593Smuzhiyun 
kvmppc_grab_hwthread(int cpu)2533*4882a593Smuzhiyun static int kvmppc_grab_hwthread(int cpu)
2534*4882a593Smuzhiyun {
2535*4882a593Smuzhiyun 	struct paca_struct *tpaca;
2536*4882a593Smuzhiyun 	long timeout = 10000;
2537*4882a593Smuzhiyun 
2538*4882a593Smuzhiyun 	tpaca = paca_ptrs[cpu];
2539*4882a593Smuzhiyun 
2540*4882a593Smuzhiyun 	/* Ensure the thread won't go into the kernel if it wakes */
2541*4882a593Smuzhiyun 	tpaca->kvm_hstate.kvm_vcpu = NULL;
2542*4882a593Smuzhiyun 	tpaca->kvm_hstate.kvm_vcore = NULL;
2543*4882a593Smuzhiyun 	tpaca->kvm_hstate.napping = 0;
2544*4882a593Smuzhiyun 	smp_wmb();
2545*4882a593Smuzhiyun 	tpaca->kvm_hstate.hwthread_req = 1;
2546*4882a593Smuzhiyun 
2547*4882a593Smuzhiyun 	/*
2548*4882a593Smuzhiyun 	 * If the thread is already executing in the kernel (e.g. handling
2549*4882a593Smuzhiyun 	 * a stray interrupt), wait for it to get back to nap mode.
2550*4882a593Smuzhiyun 	 * The smp_mb() is to ensure that our setting of hwthread_req
2551*4882a593Smuzhiyun 	 * is visible before we look at hwthread_state, so if this
2552*4882a593Smuzhiyun 	 * races with the code at system_reset_pSeries and the thread
2553*4882a593Smuzhiyun 	 * misses our setting of hwthread_req, we are sure to see its
2554*4882a593Smuzhiyun 	 * setting of hwthread_state, and vice versa.
2555*4882a593Smuzhiyun 	 */
2556*4882a593Smuzhiyun 	smp_mb();
2557*4882a593Smuzhiyun 	while (tpaca->kvm_hstate.hwthread_state == KVM_HWTHREAD_IN_KERNEL) {
2558*4882a593Smuzhiyun 		if (--timeout <= 0) {
2559*4882a593Smuzhiyun 			pr_err("KVM: couldn't grab cpu %d\n", cpu);
2560*4882a593Smuzhiyun 			return -EBUSY;
2561*4882a593Smuzhiyun 		}
2562*4882a593Smuzhiyun 		udelay(1);
2563*4882a593Smuzhiyun 	}
2564*4882a593Smuzhiyun 	return 0;
2565*4882a593Smuzhiyun }
2566*4882a593Smuzhiyun 
kvmppc_release_hwthread(int cpu)2567*4882a593Smuzhiyun static void kvmppc_release_hwthread(int cpu)
2568*4882a593Smuzhiyun {
2569*4882a593Smuzhiyun 	struct paca_struct *tpaca;
2570*4882a593Smuzhiyun 
2571*4882a593Smuzhiyun 	tpaca = paca_ptrs[cpu];
2572*4882a593Smuzhiyun 	tpaca->kvm_hstate.hwthread_req = 0;
2573*4882a593Smuzhiyun 	tpaca->kvm_hstate.kvm_vcpu = NULL;
2574*4882a593Smuzhiyun 	tpaca->kvm_hstate.kvm_vcore = NULL;
2575*4882a593Smuzhiyun 	tpaca->kvm_hstate.kvm_split_mode = NULL;
2576*4882a593Smuzhiyun }
2577*4882a593Smuzhiyun 
radix_flush_cpu(struct kvm * kvm,int cpu,struct kvm_vcpu * vcpu)2578*4882a593Smuzhiyun static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
2579*4882a593Smuzhiyun {
2580*4882a593Smuzhiyun 	struct kvm_nested_guest *nested = vcpu->arch.nested;
2581*4882a593Smuzhiyun 	cpumask_t *cpu_in_guest;
2582*4882a593Smuzhiyun 	int i;
2583*4882a593Smuzhiyun 
2584*4882a593Smuzhiyun 	cpu = cpu_first_tlb_thread_sibling(cpu);
2585*4882a593Smuzhiyun 	if (nested) {
2586*4882a593Smuzhiyun 		cpumask_set_cpu(cpu, &nested->need_tlb_flush);
2587*4882a593Smuzhiyun 		cpu_in_guest = &nested->cpu_in_guest;
2588*4882a593Smuzhiyun 	} else {
2589*4882a593Smuzhiyun 		cpumask_set_cpu(cpu, &kvm->arch.need_tlb_flush);
2590*4882a593Smuzhiyun 		cpu_in_guest = &kvm->arch.cpu_in_guest;
2591*4882a593Smuzhiyun 	}
2592*4882a593Smuzhiyun 	/*
2593*4882a593Smuzhiyun 	 * Make sure setting of bit in need_tlb_flush precedes
2594*4882a593Smuzhiyun 	 * testing of cpu_in_guest bits.  The matching barrier on
2595*4882a593Smuzhiyun 	 * the other side is the first smp_mb() in kvmppc_run_core().
2596*4882a593Smuzhiyun 	 */
2597*4882a593Smuzhiyun 	smp_mb();
2598*4882a593Smuzhiyun 	for (i = cpu; i <= cpu_last_tlb_thread_sibling(cpu);
2599*4882a593Smuzhiyun 					i += cpu_tlb_thread_sibling_step())
2600*4882a593Smuzhiyun 		if (cpumask_test_cpu(i, cpu_in_guest))
2601*4882a593Smuzhiyun 			smp_call_function_single(i, do_nothing, NULL, 1);
2602*4882a593Smuzhiyun }
2603*4882a593Smuzhiyun 
kvmppc_prepare_radix_vcpu(struct kvm_vcpu * vcpu,int pcpu)2604*4882a593Smuzhiyun static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
2605*4882a593Smuzhiyun {
2606*4882a593Smuzhiyun 	struct kvm_nested_guest *nested = vcpu->arch.nested;
2607*4882a593Smuzhiyun 	struct kvm *kvm = vcpu->kvm;
2608*4882a593Smuzhiyun 	int prev_cpu;
2609*4882a593Smuzhiyun 
2610*4882a593Smuzhiyun 	if (!cpu_has_feature(CPU_FTR_HVMODE))
2611*4882a593Smuzhiyun 		return;
2612*4882a593Smuzhiyun 
2613*4882a593Smuzhiyun 	if (nested)
2614*4882a593Smuzhiyun 		prev_cpu = nested->prev_cpu[vcpu->arch.nested_vcpu_id];
2615*4882a593Smuzhiyun 	else
2616*4882a593Smuzhiyun 		prev_cpu = vcpu->arch.prev_cpu;
2617*4882a593Smuzhiyun 
2618*4882a593Smuzhiyun 	/*
2619*4882a593Smuzhiyun 	 * With radix, the guest can do TLB invalidations itself,
2620*4882a593Smuzhiyun 	 * and it could choose to use the local form (tlbiel) if
2621*4882a593Smuzhiyun 	 * it is invalidating a translation that has only ever been
2622*4882a593Smuzhiyun 	 * used on one vcpu.  However, that doesn't mean it has
2623*4882a593Smuzhiyun 	 * only ever been used on one physical cpu, since vcpus
2624*4882a593Smuzhiyun 	 * can move around between pcpus.  To cope with this, when
2625*4882a593Smuzhiyun 	 * a vcpu moves from one pcpu to another, we need to tell
2626*4882a593Smuzhiyun 	 * any vcpus running on the same core as this vcpu previously
2627*4882a593Smuzhiyun 	 * ran to flush the TLB.  The TLB is shared between threads,
2628*4882a593Smuzhiyun 	 * so we use a single bit in .need_tlb_flush for all 4 threads.
2629*4882a593Smuzhiyun 	 */
2630*4882a593Smuzhiyun 	if (prev_cpu != pcpu) {
2631*4882a593Smuzhiyun 		if (prev_cpu >= 0 &&
2632*4882a593Smuzhiyun 		    cpu_first_tlb_thread_sibling(prev_cpu) !=
2633*4882a593Smuzhiyun 		    cpu_first_tlb_thread_sibling(pcpu))
2634*4882a593Smuzhiyun 			radix_flush_cpu(kvm, prev_cpu, vcpu);
2635*4882a593Smuzhiyun 		if (nested)
2636*4882a593Smuzhiyun 			nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu;
2637*4882a593Smuzhiyun 		else
2638*4882a593Smuzhiyun 			vcpu->arch.prev_cpu = pcpu;
2639*4882a593Smuzhiyun 	}
2640*4882a593Smuzhiyun }
2641*4882a593Smuzhiyun 
kvmppc_start_thread(struct kvm_vcpu * vcpu,struct kvmppc_vcore * vc)2642*4882a593Smuzhiyun static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc)
2643*4882a593Smuzhiyun {
2644*4882a593Smuzhiyun 	int cpu;
2645*4882a593Smuzhiyun 	struct paca_struct *tpaca;
2646*4882a593Smuzhiyun 	struct kvm *kvm = vc->kvm;
2647*4882a593Smuzhiyun 
2648*4882a593Smuzhiyun 	cpu = vc->pcpu;
2649*4882a593Smuzhiyun 	if (vcpu) {
2650*4882a593Smuzhiyun 		if (vcpu->arch.timer_running) {
2651*4882a593Smuzhiyun 			hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
2652*4882a593Smuzhiyun 			vcpu->arch.timer_running = 0;
2653*4882a593Smuzhiyun 		}
2654*4882a593Smuzhiyun 		cpu += vcpu->arch.ptid;
2655*4882a593Smuzhiyun 		vcpu->cpu = vc->pcpu;
2656*4882a593Smuzhiyun 		vcpu->arch.thread_cpu = cpu;
2657*4882a593Smuzhiyun 		cpumask_set_cpu(cpu, &kvm->arch.cpu_in_guest);
2658*4882a593Smuzhiyun 	}
2659*4882a593Smuzhiyun 	tpaca = paca_ptrs[cpu];
2660*4882a593Smuzhiyun 	tpaca->kvm_hstate.kvm_vcpu = vcpu;
2661*4882a593Smuzhiyun 	tpaca->kvm_hstate.ptid = cpu - vc->pcpu;
2662*4882a593Smuzhiyun 	tpaca->kvm_hstate.fake_suspend = 0;
2663*4882a593Smuzhiyun 	/* Order stores to hstate.kvm_vcpu etc. before store to kvm_vcore */
2664*4882a593Smuzhiyun 	smp_wmb();
2665*4882a593Smuzhiyun 	tpaca->kvm_hstate.kvm_vcore = vc;
2666*4882a593Smuzhiyun 	if (cpu != smp_processor_id())
2667*4882a593Smuzhiyun 		kvmppc_ipi_thread(cpu);
2668*4882a593Smuzhiyun }
2669*4882a593Smuzhiyun 
kvmppc_wait_for_nap(int n_threads)2670*4882a593Smuzhiyun static void kvmppc_wait_for_nap(int n_threads)
2671*4882a593Smuzhiyun {
2672*4882a593Smuzhiyun 	int cpu = smp_processor_id();
2673*4882a593Smuzhiyun 	int i, loops;
2674*4882a593Smuzhiyun 
2675*4882a593Smuzhiyun 	if (n_threads <= 1)
2676*4882a593Smuzhiyun 		return;
2677*4882a593Smuzhiyun 	for (loops = 0; loops < 1000000; ++loops) {
2678*4882a593Smuzhiyun 		/*
2679*4882a593Smuzhiyun 		 * Check if all threads are finished.
2680*4882a593Smuzhiyun 		 * We set the vcore pointer when starting a thread
2681*4882a593Smuzhiyun 		 * and the thread clears it when finished, so we look
2682*4882a593Smuzhiyun 		 * for any threads that still have a non-NULL vcore ptr.
2683*4882a593Smuzhiyun 		 */
2684*4882a593Smuzhiyun 		for (i = 1; i < n_threads; ++i)
2685*4882a593Smuzhiyun 			if (paca_ptrs[cpu + i]->kvm_hstate.kvm_vcore)
2686*4882a593Smuzhiyun 				break;
2687*4882a593Smuzhiyun 		if (i == n_threads) {
2688*4882a593Smuzhiyun 			HMT_medium();
2689*4882a593Smuzhiyun 			return;
2690*4882a593Smuzhiyun 		}
2691*4882a593Smuzhiyun 		HMT_low();
2692*4882a593Smuzhiyun 	}
2693*4882a593Smuzhiyun 	HMT_medium();
2694*4882a593Smuzhiyun 	for (i = 1; i < n_threads; ++i)
2695*4882a593Smuzhiyun 		if (paca_ptrs[cpu + i]->kvm_hstate.kvm_vcore)
2696*4882a593Smuzhiyun 			pr_err("KVM: CPU %d seems to be stuck\n", cpu + i);
2697*4882a593Smuzhiyun }
2698*4882a593Smuzhiyun 
2699*4882a593Smuzhiyun /*
2700*4882a593Smuzhiyun  * Check that we are on thread 0 and that any other threads in
2701*4882a593Smuzhiyun  * this core are off-line.  Then grab the threads so they can't
2702*4882a593Smuzhiyun  * enter the kernel.
2703*4882a593Smuzhiyun  */
on_primary_thread(void)2704*4882a593Smuzhiyun static int on_primary_thread(void)
2705*4882a593Smuzhiyun {
2706*4882a593Smuzhiyun 	int cpu = smp_processor_id();
2707*4882a593Smuzhiyun 	int thr;
2708*4882a593Smuzhiyun 
2709*4882a593Smuzhiyun 	/* Are we on a primary subcore? */
2710*4882a593Smuzhiyun 	if (cpu_thread_in_subcore(cpu))
2711*4882a593Smuzhiyun 		return 0;
2712*4882a593Smuzhiyun 
2713*4882a593Smuzhiyun 	thr = 0;
2714*4882a593Smuzhiyun 	while (++thr < threads_per_subcore)
2715*4882a593Smuzhiyun 		if (cpu_online(cpu + thr))
2716*4882a593Smuzhiyun 			return 0;
2717*4882a593Smuzhiyun 
2718*4882a593Smuzhiyun 	/* Grab all hw threads so they can't go into the kernel */
2719*4882a593Smuzhiyun 	for (thr = 1; thr < threads_per_subcore; ++thr) {
2720*4882a593Smuzhiyun 		if (kvmppc_grab_hwthread(cpu + thr)) {
2721*4882a593Smuzhiyun 			/* Couldn't grab one; let the others go */
2722*4882a593Smuzhiyun 			do {
2723*4882a593Smuzhiyun 				kvmppc_release_hwthread(cpu + thr);
2724*4882a593Smuzhiyun 			} while (--thr > 0);
2725*4882a593Smuzhiyun 			return 0;
2726*4882a593Smuzhiyun 		}
2727*4882a593Smuzhiyun 	}
2728*4882a593Smuzhiyun 	return 1;
2729*4882a593Smuzhiyun }
2730*4882a593Smuzhiyun 
2731*4882a593Smuzhiyun /*
2732*4882a593Smuzhiyun  * A list of virtual cores for each physical CPU.
2733*4882a593Smuzhiyun  * These are vcores that could run but their runner VCPU tasks are
2734*4882a593Smuzhiyun  * (or may be) preempted.
2735*4882a593Smuzhiyun  */
2736*4882a593Smuzhiyun struct preempted_vcore_list {
2737*4882a593Smuzhiyun 	struct list_head	list;
2738*4882a593Smuzhiyun 	spinlock_t		lock;
2739*4882a593Smuzhiyun };
2740*4882a593Smuzhiyun 
2741*4882a593Smuzhiyun static DEFINE_PER_CPU(struct preempted_vcore_list, preempted_vcores);
2742*4882a593Smuzhiyun 
init_vcore_lists(void)2743*4882a593Smuzhiyun static void init_vcore_lists(void)
2744*4882a593Smuzhiyun {
2745*4882a593Smuzhiyun 	int cpu;
2746*4882a593Smuzhiyun 
2747*4882a593Smuzhiyun 	for_each_possible_cpu(cpu) {
2748*4882a593Smuzhiyun 		struct preempted_vcore_list *lp = &per_cpu(preempted_vcores, cpu);
2749*4882a593Smuzhiyun 		spin_lock_init(&lp->lock);
2750*4882a593Smuzhiyun 		INIT_LIST_HEAD(&lp->list);
2751*4882a593Smuzhiyun 	}
2752*4882a593Smuzhiyun }
2753*4882a593Smuzhiyun 
kvmppc_vcore_preempt(struct kvmppc_vcore * vc)2754*4882a593Smuzhiyun static void kvmppc_vcore_preempt(struct kvmppc_vcore *vc)
2755*4882a593Smuzhiyun {
2756*4882a593Smuzhiyun 	struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores);
2757*4882a593Smuzhiyun 
2758*4882a593Smuzhiyun 	vc->vcore_state = VCORE_PREEMPT;
2759*4882a593Smuzhiyun 	vc->pcpu = smp_processor_id();
2760*4882a593Smuzhiyun 	if (vc->num_threads < threads_per_vcore(vc->kvm)) {
2761*4882a593Smuzhiyun 		spin_lock(&lp->lock);
2762*4882a593Smuzhiyun 		list_add_tail(&vc->preempt_list, &lp->list);
2763*4882a593Smuzhiyun 		spin_unlock(&lp->lock);
2764*4882a593Smuzhiyun 	}
2765*4882a593Smuzhiyun 
2766*4882a593Smuzhiyun 	/* Start accumulating stolen time */
2767*4882a593Smuzhiyun 	kvmppc_core_start_stolen(vc);
2768*4882a593Smuzhiyun }
2769*4882a593Smuzhiyun 
kvmppc_vcore_end_preempt(struct kvmppc_vcore * vc)2770*4882a593Smuzhiyun static void kvmppc_vcore_end_preempt(struct kvmppc_vcore *vc)
2771*4882a593Smuzhiyun {
2772*4882a593Smuzhiyun 	struct preempted_vcore_list *lp;
2773*4882a593Smuzhiyun 
2774*4882a593Smuzhiyun 	kvmppc_core_end_stolen(vc);
2775*4882a593Smuzhiyun 	if (!list_empty(&vc->preempt_list)) {
2776*4882a593Smuzhiyun 		lp = &per_cpu(preempted_vcores, vc->pcpu);
2777*4882a593Smuzhiyun 		spin_lock(&lp->lock);
2778*4882a593Smuzhiyun 		list_del_init(&vc->preempt_list);
2779*4882a593Smuzhiyun 		spin_unlock(&lp->lock);
2780*4882a593Smuzhiyun 	}
2781*4882a593Smuzhiyun 	vc->vcore_state = VCORE_INACTIVE;
2782*4882a593Smuzhiyun }
2783*4882a593Smuzhiyun 
2784*4882a593Smuzhiyun /*
2785*4882a593Smuzhiyun  * This stores information about the virtual cores currently
2786*4882a593Smuzhiyun  * assigned to a physical core.
2787*4882a593Smuzhiyun  */
2788*4882a593Smuzhiyun struct core_info {
2789*4882a593Smuzhiyun 	int		n_subcores;
2790*4882a593Smuzhiyun 	int		max_subcore_threads;
2791*4882a593Smuzhiyun 	int		total_threads;
2792*4882a593Smuzhiyun 	int		subcore_threads[MAX_SUBCORES];
2793*4882a593Smuzhiyun 	struct kvmppc_vcore *vc[MAX_SUBCORES];
2794*4882a593Smuzhiyun };
2795*4882a593Smuzhiyun 
2796*4882a593Smuzhiyun /*
2797*4882a593Smuzhiyun  * This mapping means subcores 0 and 1 can use threads 0-3 and 4-7
2798*4882a593Smuzhiyun  * respectively in 2-way micro-threading (split-core) mode on POWER8.
2799*4882a593Smuzhiyun  */
2800*4882a593Smuzhiyun static int subcore_thread_map[MAX_SUBCORES] = { 0, 4, 2, 6 };
2801*4882a593Smuzhiyun 
init_core_info(struct core_info * cip,struct kvmppc_vcore * vc)2802*4882a593Smuzhiyun static void init_core_info(struct core_info *cip, struct kvmppc_vcore *vc)
2803*4882a593Smuzhiyun {
2804*4882a593Smuzhiyun 	memset(cip, 0, sizeof(*cip));
2805*4882a593Smuzhiyun 	cip->n_subcores = 1;
2806*4882a593Smuzhiyun 	cip->max_subcore_threads = vc->num_threads;
2807*4882a593Smuzhiyun 	cip->total_threads = vc->num_threads;
2808*4882a593Smuzhiyun 	cip->subcore_threads[0] = vc->num_threads;
2809*4882a593Smuzhiyun 	cip->vc[0] = vc;
2810*4882a593Smuzhiyun }
2811*4882a593Smuzhiyun 
subcore_config_ok(int n_subcores,int n_threads)2812*4882a593Smuzhiyun static bool subcore_config_ok(int n_subcores, int n_threads)
2813*4882a593Smuzhiyun {
2814*4882a593Smuzhiyun 	/*
2815*4882a593Smuzhiyun 	 * POWER9 "SMT4" cores are permanently in what is effectively a 4-way
2816*4882a593Smuzhiyun 	 * split-core mode, with one thread per subcore.
2817*4882a593Smuzhiyun 	 */
2818*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_ARCH_300))
2819*4882a593Smuzhiyun 		return n_subcores <= 4 && n_threads == 1;
2820*4882a593Smuzhiyun 
2821*4882a593Smuzhiyun 	/* On POWER8, can only dynamically split if unsplit to begin with */
2822*4882a593Smuzhiyun 	if (n_subcores > 1 && threads_per_subcore < MAX_SMT_THREADS)
2823*4882a593Smuzhiyun 		return false;
2824*4882a593Smuzhiyun 	if (n_subcores > MAX_SUBCORES)
2825*4882a593Smuzhiyun 		return false;
2826*4882a593Smuzhiyun 	if (n_subcores > 1) {
2827*4882a593Smuzhiyun 		if (!(dynamic_mt_modes & 2))
2828*4882a593Smuzhiyun 			n_subcores = 4;
2829*4882a593Smuzhiyun 		if (n_subcores > 2 && !(dynamic_mt_modes & 4))
2830*4882a593Smuzhiyun 			return false;
2831*4882a593Smuzhiyun 	}
2832*4882a593Smuzhiyun 
2833*4882a593Smuzhiyun 	return n_subcores * roundup_pow_of_two(n_threads) <= MAX_SMT_THREADS;
2834*4882a593Smuzhiyun }
2835*4882a593Smuzhiyun 
init_vcore_to_run(struct kvmppc_vcore * vc)2836*4882a593Smuzhiyun static void init_vcore_to_run(struct kvmppc_vcore *vc)
2837*4882a593Smuzhiyun {
2838*4882a593Smuzhiyun 	vc->entry_exit_map = 0;
2839*4882a593Smuzhiyun 	vc->in_guest = 0;
2840*4882a593Smuzhiyun 	vc->napping_threads = 0;
2841*4882a593Smuzhiyun 	vc->conferring_threads = 0;
2842*4882a593Smuzhiyun 	vc->tb_offset_applied = 0;
2843*4882a593Smuzhiyun }
2844*4882a593Smuzhiyun 
can_dynamic_split(struct kvmppc_vcore * vc,struct core_info * cip)2845*4882a593Smuzhiyun static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip)
2846*4882a593Smuzhiyun {
2847*4882a593Smuzhiyun 	int n_threads = vc->num_threads;
2848*4882a593Smuzhiyun 	int sub;
2849*4882a593Smuzhiyun 
2850*4882a593Smuzhiyun 	if (!cpu_has_feature(CPU_FTR_ARCH_207S))
2851*4882a593Smuzhiyun 		return false;
2852*4882a593Smuzhiyun 
2853*4882a593Smuzhiyun 	/* In one_vm_per_core mode, require all vcores to be from the same vm */
2854*4882a593Smuzhiyun 	if (one_vm_per_core && vc->kvm != cip->vc[0]->kvm)
2855*4882a593Smuzhiyun 		return false;
2856*4882a593Smuzhiyun 
2857*4882a593Smuzhiyun 	/* Some POWER9 chips require all threads to be in the same MMU mode */
2858*4882a593Smuzhiyun 	if (no_mixing_hpt_and_radix &&
2859*4882a593Smuzhiyun 	    kvm_is_radix(vc->kvm) != kvm_is_radix(cip->vc[0]->kvm))
2860*4882a593Smuzhiyun 		return false;
2861*4882a593Smuzhiyun 
2862*4882a593Smuzhiyun 	if (n_threads < cip->max_subcore_threads)
2863*4882a593Smuzhiyun 		n_threads = cip->max_subcore_threads;
2864*4882a593Smuzhiyun 	if (!subcore_config_ok(cip->n_subcores + 1, n_threads))
2865*4882a593Smuzhiyun 		return false;
2866*4882a593Smuzhiyun 	cip->max_subcore_threads = n_threads;
2867*4882a593Smuzhiyun 
2868*4882a593Smuzhiyun 	sub = cip->n_subcores;
2869*4882a593Smuzhiyun 	++cip->n_subcores;
2870*4882a593Smuzhiyun 	cip->total_threads += vc->num_threads;
2871*4882a593Smuzhiyun 	cip->subcore_threads[sub] = vc->num_threads;
2872*4882a593Smuzhiyun 	cip->vc[sub] = vc;
2873*4882a593Smuzhiyun 	init_vcore_to_run(vc);
2874*4882a593Smuzhiyun 	list_del_init(&vc->preempt_list);
2875*4882a593Smuzhiyun 
2876*4882a593Smuzhiyun 	return true;
2877*4882a593Smuzhiyun }
2878*4882a593Smuzhiyun 
2879*4882a593Smuzhiyun /*
2880*4882a593Smuzhiyun  * Work out whether it is possible to piggyback the execution of
2881*4882a593Smuzhiyun  * vcore *pvc onto the execution of the other vcores described in *cip.
2882*4882a593Smuzhiyun  */
can_piggyback(struct kvmppc_vcore * pvc,struct core_info * cip,int target_threads)2883*4882a593Smuzhiyun static bool can_piggyback(struct kvmppc_vcore *pvc, struct core_info *cip,
2884*4882a593Smuzhiyun 			  int target_threads)
2885*4882a593Smuzhiyun {
2886*4882a593Smuzhiyun 	if (cip->total_threads + pvc->num_threads > target_threads)
2887*4882a593Smuzhiyun 		return false;
2888*4882a593Smuzhiyun 
2889*4882a593Smuzhiyun 	return can_dynamic_split(pvc, cip);
2890*4882a593Smuzhiyun }
2891*4882a593Smuzhiyun 
prepare_threads(struct kvmppc_vcore * vc)2892*4882a593Smuzhiyun static void prepare_threads(struct kvmppc_vcore *vc)
2893*4882a593Smuzhiyun {
2894*4882a593Smuzhiyun 	int i;
2895*4882a593Smuzhiyun 	struct kvm_vcpu *vcpu;
2896*4882a593Smuzhiyun 
2897*4882a593Smuzhiyun 	for_each_runnable_thread(i, vcpu, vc) {
2898*4882a593Smuzhiyun 		if (signal_pending(vcpu->arch.run_task))
2899*4882a593Smuzhiyun 			vcpu->arch.ret = -EINTR;
2900*4882a593Smuzhiyun 		else if (vcpu->arch.vpa.update_pending ||
2901*4882a593Smuzhiyun 			 vcpu->arch.slb_shadow.update_pending ||
2902*4882a593Smuzhiyun 			 vcpu->arch.dtl.update_pending)
2903*4882a593Smuzhiyun 			vcpu->arch.ret = RESUME_GUEST;
2904*4882a593Smuzhiyun 		else
2905*4882a593Smuzhiyun 			continue;
2906*4882a593Smuzhiyun 		kvmppc_remove_runnable(vc, vcpu);
2907*4882a593Smuzhiyun 		wake_up(&vcpu->arch.cpu_run);
2908*4882a593Smuzhiyun 	}
2909*4882a593Smuzhiyun }
2910*4882a593Smuzhiyun 
collect_piggybacks(struct core_info * cip,int target_threads)2911*4882a593Smuzhiyun static void collect_piggybacks(struct core_info *cip, int target_threads)
2912*4882a593Smuzhiyun {
2913*4882a593Smuzhiyun 	struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores);
2914*4882a593Smuzhiyun 	struct kvmppc_vcore *pvc, *vcnext;
2915*4882a593Smuzhiyun 
2916*4882a593Smuzhiyun 	spin_lock(&lp->lock);
2917*4882a593Smuzhiyun 	list_for_each_entry_safe(pvc, vcnext, &lp->list, preempt_list) {
2918*4882a593Smuzhiyun 		if (!spin_trylock(&pvc->lock))
2919*4882a593Smuzhiyun 			continue;
2920*4882a593Smuzhiyun 		prepare_threads(pvc);
2921*4882a593Smuzhiyun 		if (!pvc->n_runnable || !pvc->kvm->arch.mmu_ready) {
2922*4882a593Smuzhiyun 			list_del_init(&pvc->preempt_list);
2923*4882a593Smuzhiyun 			if (pvc->runner == NULL) {
2924*4882a593Smuzhiyun 				pvc->vcore_state = VCORE_INACTIVE;
2925*4882a593Smuzhiyun 				kvmppc_core_end_stolen(pvc);
2926*4882a593Smuzhiyun 			}
2927*4882a593Smuzhiyun 			spin_unlock(&pvc->lock);
2928*4882a593Smuzhiyun 			continue;
2929*4882a593Smuzhiyun 		}
2930*4882a593Smuzhiyun 		if (!can_piggyback(pvc, cip, target_threads)) {
2931*4882a593Smuzhiyun 			spin_unlock(&pvc->lock);
2932*4882a593Smuzhiyun 			continue;
2933*4882a593Smuzhiyun 		}
2934*4882a593Smuzhiyun 		kvmppc_core_end_stolen(pvc);
2935*4882a593Smuzhiyun 		pvc->vcore_state = VCORE_PIGGYBACK;
2936*4882a593Smuzhiyun 		if (cip->total_threads >= target_threads)
2937*4882a593Smuzhiyun 			break;
2938*4882a593Smuzhiyun 	}
2939*4882a593Smuzhiyun 	spin_unlock(&lp->lock);
2940*4882a593Smuzhiyun }
2941*4882a593Smuzhiyun 
recheck_signals_and_mmu(struct core_info * cip)2942*4882a593Smuzhiyun static bool recheck_signals_and_mmu(struct core_info *cip)
2943*4882a593Smuzhiyun {
2944*4882a593Smuzhiyun 	int sub, i;
2945*4882a593Smuzhiyun 	struct kvm_vcpu *vcpu;
2946*4882a593Smuzhiyun 	struct kvmppc_vcore *vc;
2947*4882a593Smuzhiyun 
2948*4882a593Smuzhiyun 	for (sub = 0; sub < cip->n_subcores; ++sub) {
2949*4882a593Smuzhiyun 		vc = cip->vc[sub];
2950*4882a593Smuzhiyun 		if (!vc->kvm->arch.mmu_ready)
2951*4882a593Smuzhiyun 			return true;
2952*4882a593Smuzhiyun 		for_each_runnable_thread(i, vcpu, vc)
2953*4882a593Smuzhiyun 			if (signal_pending(vcpu->arch.run_task))
2954*4882a593Smuzhiyun 				return true;
2955*4882a593Smuzhiyun 	}
2956*4882a593Smuzhiyun 	return false;
2957*4882a593Smuzhiyun }
2958*4882a593Smuzhiyun 
post_guest_process(struct kvmppc_vcore * vc,bool is_master)2959*4882a593Smuzhiyun static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
2960*4882a593Smuzhiyun {
2961*4882a593Smuzhiyun 	int still_running = 0, i;
2962*4882a593Smuzhiyun 	u64 now;
2963*4882a593Smuzhiyun 	long ret;
2964*4882a593Smuzhiyun 	struct kvm_vcpu *vcpu;
2965*4882a593Smuzhiyun 
2966*4882a593Smuzhiyun 	spin_lock(&vc->lock);
2967*4882a593Smuzhiyun 	now = get_tb();
2968*4882a593Smuzhiyun 	for_each_runnable_thread(i, vcpu, vc) {
2969*4882a593Smuzhiyun 		/*
2970*4882a593Smuzhiyun 		 * It's safe to unlock the vcore in the loop here, because
2971*4882a593Smuzhiyun 		 * for_each_runnable_thread() is safe against removal of
2972*4882a593Smuzhiyun 		 * the vcpu, and the vcore state is VCORE_EXITING here,
2973*4882a593Smuzhiyun 		 * so any vcpus becoming runnable will have their arch.trap
2974*4882a593Smuzhiyun 		 * set to zero and can't actually run in the guest.
2975*4882a593Smuzhiyun 		 */
2976*4882a593Smuzhiyun 		spin_unlock(&vc->lock);
2977*4882a593Smuzhiyun 		/* cancel pending dec exception if dec is positive */
2978*4882a593Smuzhiyun 		if (now < vcpu->arch.dec_expires &&
2979*4882a593Smuzhiyun 		    kvmppc_core_pending_dec(vcpu))
2980*4882a593Smuzhiyun 			kvmppc_core_dequeue_dec(vcpu);
2981*4882a593Smuzhiyun 
2982*4882a593Smuzhiyun 		trace_kvm_guest_exit(vcpu);
2983*4882a593Smuzhiyun 
2984*4882a593Smuzhiyun 		ret = RESUME_GUEST;
2985*4882a593Smuzhiyun 		if (vcpu->arch.trap)
2986*4882a593Smuzhiyun 			ret = kvmppc_handle_exit_hv(vcpu,
2987*4882a593Smuzhiyun 						    vcpu->arch.run_task);
2988*4882a593Smuzhiyun 
2989*4882a593Smuzhiyun 		vcpu->arch.ret = ret;
2990*4882a593Smuzhiyun 		vcpu->arch.trap = 0;
2991*4882a593Smuzhiyun 
2992*4882a593Smuzhiyun 		spin_lock(&vc->lock);
2993*4882a593Smuzhiyun 		if (is_kvmppc_resume_guest(vcpu->arch.ret)) {
2994*4882a593Smuzhiyun 			if (vcpu->arch.pending_exceptions)
2995*4882a593Smuzhiyun 				kvmppc_core_prepare_to_enter(vcpu);
2996*4882a593Smuzhiyun 			if (vcpu->arch.ceded)
2997*4882a593Smuzhiyun 				kvmppc_set_timer(vcpu);
2998*4882a593Smuzhiyun 			else
2999*4882a593Smuzhiyun 				++still_running;
3000*4882a593Smuzhiyun 		} else {
3001*4882a593Smuzhiyun 			kvmppc_remove_runnable(vc, vcpu);
3002*4882a593Smuzhiyun 			wake_up(&vcpu->arch.cpu_run);
3003*4882a593Smuzhiyun 		}
3004*4882a593Smuzhiyun 	}
3005*4882a593Smuzhiyun 	if (!is_master) {
3006*4882a593Smuzhiyun 		if (still_running > 0) {
3007*4882a593Smuzhiyun 			kvmppc_vcore_preempt(vc);
3008*4882a593Smuzhiyun 		} else if (vc->runner) {
3009*4882a593Smuzhiyun 			vc->vcore_state = VCORE_PREEMPT;
3010*4882a593Smuzhiyun 			kvmppc_core_start_stolen(vc);
3011*4882a593Smuzhiyun 		} else {
3012*4882a593Smuzhiyun 			vc->vcore_state = VCORE_INACTIVE;
3013*4882a593Smuzhiyun 		}
3014*4882a593Smuzhiyun 		if (vc->n_runnable > 0 && vc->runner == NULL) {
3015*4882a593Smuzhiyun 			/* make sure there's a candidate runner awake */
3016*4882a593Smuzhiyun 			i = -1;
3017*4882a593Smuzhiyun 			vcpu = next_runnable_thread(vc, &i);
3018*4882a593Smuzhiyun 			wake_up(&vcpu->arch.cpu_run);
3019*4882a593Smuzhiyun 		}
3020*4882a593Smuzhiyun 	}
3021*4882a593Smuzhiyun 	spin_unlock(&vc->lock);
3022*4882a593Smuzhiyun }
3023*4882a593Smuzhiyun 
3024*4882a593Smuzhiyun /*
3025*4882a593Smuzhiyun  * Clear core from the list of active host cores as we are about to
3026*4882a593Smuzhiyun  * enter the guest. Only do this if it is the primary thread of the
3027*4882a593Smuzhiyun  * core (not if a subcore) that is entering the guest.
3028*4882a593Smuzhiyun  */
kvmppc_clear_host_core(unsigned int cpu)3029*4882a593Smuzhiyun static inline int kvmppc_clear_host_core(unsigned int cpu)
3030*4882a593Smuzhiyun {
3031*4882a593Smuzhiyun 	int core;
3032*4882a593Smuzhiyun 
3033*4882a593Smuzhiyun 	if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu))
3034*4882a593Smuzhiyun 		return 0;
3035*4882a593Smuzhiyun 	/*
3036*4882a593Smuzhiyun 	 * Memory barrier can be omitted here as we will do a smp_wmb()
3037*4882a593Smuzhiyun 	 * later in kvmppc_start_thread and we need ensure that state is
3038*4882a593Smuzhiyun 	 * visible to other CPUs only after we enter guest.
3039*4882a593Smuzhiyun 	 */
3040*4882a593Smuzhiyun 	core = cpu >> threads_shift;
3041*4882a593Smuzhiyun 	kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 0;
3042*4882a593Smuzhiyun 	return 0;
3043*4882a593Smuzhiyun }
3044*4882a593Smuzhiyun 
3045*4882a593Smuzhiyun /*
3046*4882a593Smuzhiyun  * Advertise this core as an active host core since we exited the guest
3047*4882a593Smuzhiyun  * Only need to do this if it is the primary thread of the core that is
3048*4882a593Smuzhiyun  * exiting.
3049*4882a593Smuzhiyun  */
kvmppc_set_host_core(unsigned int cpu)3050*4882a593Smuzhiyun static inline int kvmppc_set_host_core(unsigned int cpu)
3051*4882a593Smuzhiyun {
3052*4882a593Smuzhiyun 	int core;
3053*4882a593Smuzhiyun 
3054*4882a593Smuzhiyun 	if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu))
3055*4882a593Smuzhiyun 		return 0;
3056*4882a593Smuzhiyun 
3057*4882a593Smuzhiyun 	/*
3058*4882a593Smuzhiyun 	 * Memory barrier can be omitted here because we do a spin_unlock
3059*4882a593Smuzhiyun 	 * immediately after this which provides the memory barrier.
3060*4882a593Smuzhiyun 	 */
3061*4882a593Smuzhiyun 	core = cpu >> threads_shift;
3062*4882a593Smuzhiyun 	kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 1;
3063*4882a593Smuzhiyun 	return 0;
3064*4882a593Smuzhiyun }
3065*4882a593Smuzhiyun 
set_irq_happened(int trap)3066*4882a593Smuzhiyun static void set_irq_happened(int trap)
3067*4882a593Smuzhiyun {
3068*4882a593Smuzhiyun 	switch (trap) {
3069*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_EXTERNAL:
3070*4882a593Smuzhiyun 		local_paca->irq_happened |= PACA_IRQ_EE;
3071*4882a593Smuzhiyun 		break;
3072*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_H_DOORBELL:
3073*4882a593Smuzhiyun 		local_paca->irq_happened |= PACA_IRQ_DBELL;
3074*4882a593Smuzhiyun 		break;
3075*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_HMI:
3076*4882a593Smuzhiyun 		local_paca->irq_happened |= PACA_IRQ_HMI;
3077*4882a593Smuzhiyun 		break;
3078*4882a593Smuzhiyun 	case BOOK3S_INTERRUPT_SYSTEM_RESET:
3079*4882a593Smuzhiyun 		replay_system_reset();
3080*4882a593Smuzhiyun 		break;
3081*4882a593Smuzhiyun 	}
3082*4882a593Smuzhiyun }
3083*4882a593Smuzhiyun 
3084*4882a593Smuzhiyun /*
3085*4882a593Smuzhiyun  * Run a set of guest threads on a physical core.
3086*4882a593Smuzhiyun  * Called with vc->lock held.
3087*4882a593Smuzhiyun  */
kvmppc_run_core(struct kvmppc_vcore * vc)3088*4882a593Smuzhiyun static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
3089*4882a593Smuzhiyun {
3090*4882a593Smuzhiyun 	struct kvm_vcpu *vcpu;
3091*4882a593Smuzhiyun 	int i;
3092*4882a593Smuzhiyun 	int srcu_idx;
3093*4882a593Smuzhiyun 	struct core_info core_info;
3094*4882a593Smuzhiyun 	struct kvmppc_vcore *pvc;
3095*4882a593Smuzhiyun 	struct kvm_split_mode split_info, *sip;
3096*4882a593Smuzhiyun 	int split, subcore_size, active;
3097*4882a593Smuzhiyun 	int sub;
3098*4882a593Smuzhiyun 	bool thr0_done;
3099*4882a593Smuzhiyun 	unsigned long cmd_bit, stat_bit;
3100*4882a593Smuzhiyun 	int pcpu, thr;
3101*4882a593Smuzhiyun 	int target_threads;
3102*4882a593Smuzhiyun 	int controlled_threads;
3103*4882a593Smuzhiyun 	int trap;
3104*4882a593Smuzhiyun 	bool is_power8;
3105*4882a593Smuzhiyun 	bool hpt_on_radix;
3106*4882a593Smuzhiyun 
3107*4882a593Smuzhiyun 	/*
3108*4882a593Smuzhiyun 	 * Remove from the list any threads that have a signal pending
3109*4882a593Smuzhiyun 	 * or need a VPA update done
3110*4882a593Smuzhiyun 	 */
3111*4882a593Smuzhiyun 	prepare_threads(vc);
3112*4882a593Smuzhiyun 
3113*4882a593Smuzhiyun 	/* if the runner is no longer runnable, let the caller pick a new one */
3114*4882a593Smuzhiyun 	if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE)
3115*4882a593Smuzhiyun 		return;
3116*4882a593Smuzhiyun 
3117*4882a593Smuzhiyun 	/*
3118*4882a593Smuzhiyun 	 * Initialize *vc.
3119*4882a593Smuzhiyun 	 */
3120*4882a593Smuzhiyun 	init_vcore_to_run(vc);
3121*4882a593Smuzhiyun 	vc->preempt_tb = TB_NIL;
3122*4882a593Smuzhiyun 
3123*4882a593Smuzhiyun 	/*
3124*4882a593Smuzhiyun 	 * Number of threads that we will be controlling: the same as
3125*4882a593Smuzhiyun 	 * the number of threads per subcore, except on POWER9,
3126*4882a593Smuzhiyun 	 * where it's 1 because the threads are (mostly) independent.
3127*4882a593Smuzhiyun 	 */
3128*4882a593Smuzhiyun 	controlled_threads = threads_per_vcore(vc->kvm);
3129*4882a593Smuzhiyun 
3130*4882a593Smuzhiyun 	/*
3131*4882a593Smuzhiyun 	 * Make sure we are running on primary threads, and that secondary
3132*4882a593Smuzhiyun 	 * threads are offline.  Also check if the number of threads in this
3133*4882a593Smuzhiyun 	 * guest are greater than the current system threads per guest.
3134*4882a593Smuzhiyun 	 * On POWER9, we need to be not in independent-threads mode if
3135*4882a593Smuzhiyun 	 * this is a HPT guest on a radix host machine where the
3136*4882a593Smuzhiyun 	 * CPU threads may not be in different MMU modes.
3137*4882a593Smuzhiyun 	 */
3138*4882a593Smuzhiyun 	hpt_on_radix = no_mixing_hpt_and_radix && radix_enabled() &&
3139*4882a593Smuzhiyun 		!kvm_is_radix(vc->kvm);
3140*4882a593Smuzhiyun 	if (((controlled_threads > 1) &&
3141*4882a593Smuzhiyun 	     ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) ||
3142*4882a593Smuzhiyun 	    (hpt_on_radix && vc->kvm->arch.threads_indep)) {
3143*4882a593Smuzhiyun 		for_each_runnable_thread(i, vcpu, vc) {
3144*4882a593Smuzhiyun 			vcpu->arch.ret = -EBUSY;
3145*4882a593Smuzhiyun 			kvmppc_remove_runnable(vc, vcpu);
3146*4882a593Smuzhiyun 			wake_up(&vcpu->arch.cpu_run);
3147*4882a593Smuzhiyun 		}
3148*4882a593Smuzhiyun 		goto out;
3149*4882a593Smuzhiyun 	}
3150*4882a593Smuzhiyun 
3151*4882a593Smuzhiyun 	/*
3152*4882a593Smuzhiyun 	 * See if we could run any other vcores on the physical core
3153*4882a593Smuzhiyun 	 * along with this one.
3154*4882a593Smuzhiyun 	 */
3155*4882a593Smuzhiyun 	init_core_info(&core_info, vc);
3156*4882a593Smuzhiyun 	pcpu = smp_processor_id();
3157*4882a593Smuzhiyun 	target_threads = controlled_threads;
3158*4882a593Smuzhiyun 	if (target_smt_mode && target_smt_mode < target_threads)
3159*4882a593Smuzhiyun 		target_threads = target_smt_mode;
3160*4882a593Smuzhiyun 	if (vc->num_threads < target_threads)
3161*4882a593Smuzhiyun 		collect_piggybacks(&core_info, target_threads);
3162*4882a593Smuzhiyun 
3163*4882a593Smuzhiyun 	/*
3164*4882a593Smuzhiyun 	 * On radix, arrange for TLB flushing if necessary.
3165*4882a593Smuzhiyun 	 * This has to be done before disabling interrupts since
3166*4882a593Smuzhiyun 	 * it uses smp_call_function().
3167*4882a593Smuzhiyun 	 */
3168*4882a593Smuzhiyun 	pcpu = smp_processor_id();
3169*4882a593Smuzhiyun 	if (kvm_is_radix(vc->kvm)) {
3170*4882a593Smuzhiyun 		for (sub = 0; sub < core_info.n_subcores; ++sub)
3171*4882a593Smuzhiyun 			for_each_runnable_thread(i, vcpu, core_info.vc[sub])
3172*4882a593Smuzhiyun 				kvmppc_prepare_radix_vcpu(vcpu, pcpu);
3173*4882a593Smuzhiyun 	}
3174*4882a593Smuzhiyun 
3175*4882a593Smuzhiyun 	/*
3176*4882a593Smuzhiyun 	 * Hard-disable interrupts, and check resched flag and signals.
3177*4882a593Smuzhiyun 	 * If we need to reschedule or deliver a signal, clean up
3178*4882a593Smuzhiyun 	 * and return without going into the guest(s).
3179*4882a593Smuzhiyun 	 * If the mmu_ready flag has been cleared, don't go into the
3180*4882a593Smuzhiyun 	 * guest because that means a HPT resize operation is in progress.
3181*4882a593Smuzhiyun 	 */
3182*4882a593Smuzhiyun 	local_irq_disable();
3183*4882a593Smuzhiyun 	hard_irq_disable();
3184*4882a593Smuzhiyun 	if (lazy_irq_pending() || need_resched() ||
3185*4882a593Smuzhiyun 	    recheck_signals_and_mmu(&core_info)) {
3186*4882a593Smuzhiyun 		local_irq_enable();
3187*4882a593Smuzhiyun 		vc->vcore_state = VCORE_INACTIVE;
3188*4882a593Smuzhiyun 		/* Unlock all except the primary vcore */
3189*4882a593Smuzhiyun 		for (sub = 1; sub < core_info.n_subcores; ++sub) {
3190*4882a593Smuzhiyun 			pvc = core_info.vc[sub];
3191*4882a593Smuzhiyun 			/* Put back on to the preempted vcores list */
3192*4882a593Smuzhiyun 			kvmppc_vcore_preempt(pvc);
3193*4882a593Smuzhiyun 			spin_unlock(&pvc->lock);
3194*4882a593Smuzhiyun 		}
3195*4882a593Smuzhiyun 		for (i = 0; i < controlled_threads; ++i)
3196*4882a593Smuzhiyun 			kvmppc_release_hwthread(pcpu + i);
3197*4882a593Smuzhiyun 		return;
3198*4882a593Smuzhiyun 	}
3199*4882a593Smuzhiyun 
3200*4882a593Smuzhiyun 	kvmppc_clear_host_core(pcpu);
3201*4882a593Smuzhiyun 
3202*4882a593Smuzhiyun 	/* Decide on micro-threading (split-core) mode */
3203*4882a593Smuzhiyun 	subcore_size = threads_per_subcore;
3204*4882a593Smuzhiyun 	cmd_bit = stat_bit = 0;
3205*4882a593Smuzhiyun 	split = core_info.n_subcores;
3206*4882a593Smuzhiyun 	sip = NULL;
3207*4882a593Smuzhiyun 	is_power8 = cpu_has_feature(CPU_FTR_ARCH_207S)
3208*4882a593Smuzhiyun 		&& !cpu_has_feature(CPU_FTR_ARCH_300);
3209*4882a593Smuzhiyun 
3210*4882a593Smuzhiyun 	if (split > 1 || hpt_on_radix) {
3211*4882a593Smuzhiyun 		sip = &split_info;
3212*4882a593Smuzhiyun 		memset(&split_info, 0, sizeof(split_info));
3213*4882a593Smuzhiyun 		for (sub = 0; sub < core_info.n_subcores; ++sub)
3214*4882a593Smuzhiyun 			split_info.vc[sub] = core_info.vc[sub];
3215*4882a593Smuzhiyun 
3216*4882a593Smuzhiyun 		if (is_power8) {
3217*4882a593Smuzhiyun 			if (split == 2 && (dynamic_mt_modes & 2)) {
3218*4882a593Smuzhiyun 				cmd_bit = HID0_POWER8_1TO2LPAR;
3219*4882a593Smuzhiyun 				stat_bit = HID0_POWER8_2LPARMODE;
3220*4882a593Smuzhiyun 			} else {
3221*4882a593Smuzhiyun 				split = 4;
3222*4882a593Smuzhiyun 				cmd_bit = HID0_POWER8_1TO4LPAR;
3223*4882a593Smuzhiyun 				stat_bit = HID0_POWER8_4LPARMODE;
3224*4882a593Smuzhiyun 			}
3225*4882a593Smuzhiyun 			subcore_size = MAX_SMT_THREADS / split;
3226*4882a593Smuzhiyun 			split_info.rpr = mfspr(SPRN_RPR);
3227*4882a593Smuzhiyun 			split_info.pmmar = mfspr(SPRN_PMMAR);
3228*4882a593Smuzhiyun 			split_info.ldbar = mfspr(SPRN_LDBAR);
3229*4882a593Smuzhiyun 			split_info.subcore_size = subcore_size;
3230*4882a593Smuzhiyun 		} else {
3231*4882a593Smuzhiyun 			split_info.subcore_size = 1;
3232*4882a593Smuzhiyun 			if (hpt_on_radix) {
3233*4882a593Smuzhiyun 				/* Use the split_info for LPCR/LPIDR changes */
3234*4882a593Smuzhiyun 				split_info.lpcr_req = vc->lpcr;
3235*4882a593Smuzhiyun 				split_info.lpidr_req = vc->kvm->arch.lpid;
3236*4882a593Smuzhiyun 				split_info.host_lpcr = vc->kvm->arch.host_lpcr;
3237*4882a593Smuzhiyun 				split_info.do_set = 1;
3238*4882a593Smuzhiyun 			}
3239*4882a593Smuzhiyun 		}
3240*4882a593Smuzhiyun 
3241*4882a593Smuzhiyun 		/* order writes to split_info before kvm_split_mode pointer */
3242*4882a593Smuzhiyun 		smp_wmb();
3243*4882a593Smuzhiyun 	}
3244*4882a593Smuzhiyun 
3245*4882a593Smuzhiyun 	for (thr = 0; thr < controlled_threads; ++thr) {
3246*4882a593Smuzhiyun 		struct paca_struct *paca = paca_ptrs[pcpu + thr];
3247*4882a593Smuzhiyun 
3248*4882a593Smuzhiyun 		paca->kvm_hstate.tid = thr;
3249*4882a593Smuzhiyun 		paca->kvm_hstate.napping = 0;
3250*4882a593Smuzhiyun 		paca->kvm_hstate.kvm_split_mode = sip;
3251*4882a593Smuzhiyun 	}
3252*4882a593Smuzhiyun 
3253*4882a593Smuzhiyun 	/* Initiate micro-threading (split-core) on POWER8 if required */
3254*4882a593Smuzhiyun 	if (cmd_bit) {
3255*4882a593Smuzhiyun 		unsigned long hid0 = mfspr(SPRN_HID0);
3256*4882a593Smuzhiyun 
3257*4882a593Smuzhiyun 		hid0 |= cmd_bit | HID0_POWER8_DYNLPARDIS;
3258*4882a593Smuzhiyun 		mb();
3259*4882a593Smuzhiyun 		mtspr(SPRN_HID0, hid0);
3260*4882a593Smuzhiyun 		isync();
3261*4882a593Smuzhiyun 		for (;;) {
3262*4882a593Smuzhiyun 			hid0 = mfspr(SPRN_HID0);
3263*4882a593Smuzhiyun 			if (hid0 & stat_bit)
3264*4882a593Smuzhiyun 				break;
3265*4882a593Smuzhiyun 			cpu_relax();
3266*4882a593Smuzhiyun 		}
3267*4882a593Smuzhiyun 	}
3268*4882a593Smuzhiyun 
3269*4882a593Smuzhiyun 	/*
3270*4882a593Smuzhiyun 	 * On POWER8, set RWMR register.
3271*4882a593Smuzhiyun 	 * Since it only affects PURR and SPURR, it doesn't affect
3272*4882a593Smuzhiyun 	 * the host, so we don't save/restore the host value.
3273*4882a593Smuzhiyun 	 */
3274*4882a593Smuzhiyun 	if (is_power8) {
3275*4882a593Smuzhiyun 		unsigned long rwmr_val = RWMR_RPA_P8_8THREAD;
3276*4882a593Smuzhiyun 		int n_online = atomic_read(&vc->online_count);
3277*4882a593Smuzhiyun 
3278*4882a593Smuzhiyun 		/*
3279*4882a593Smuzhiyun 		 * Use the 8-thread value if we're doing split-core
3280*4882a593Smuzhiyun 		 * or if the vcore's online count looks bogus.
3281*4882a593Smuzhiyun 		 */
3282*4882a593Smuzhiyun 		if (split == 1 && threads_per_subcore == MAX_SMT_THREADS &&
3283*4882a593Smuzhiyun 		    n_online >= 1 && n_online <= MAX_SMT_THREADS)
3284*4882a593Smuzhiyun 			rwmr_val = p8_rwmr_values[n_online];
3285*4882a593Smuzhiyun 		mtspr(SPRN_RWMR, rwmr_val);
3286*4882a593Smuzhiyun 	}
3287*4882a593Smuzhiyun 
3288*4882a593Smuzhiyun 	/* Start all the threads */
3289*4882a593Smuzhiyun 	active = 0;
3290*4882a593Smuzhiyun 	for (sub = 0; sub < core_info.n_subcores; ++sub) {
3291*4882a593Smuzhiyun 		thr = is_power8 ? subcore_thread_map[sub] : sub;
3292*4882a593Smuzhiyun 		thr0_done = false;
3293*4882a593Smuzhiyun 		active |= 1 << thr;
3294*4882a593Smuzhiyun 		pvc = core_info.vc[sub];
3295*4882a593Smuzhiyun 		pvc->pcpu = pcpu + thr;
3296*4882a593Smuzhiyun 		for_each_runnable_thread(i, vcpu, pvc) {
3297*4882a593Smuzhiyun 			kvmppc_start_thread(vcpu, pvc);
3298*4882a593Smuzhiyun 			kvmppc_create_dtl_entry(vcpu, pvc);
3299*4882a593Smuzhiyun 			trace_kvm_guest_enter(vcpu);
3300*4882a593Smuzhiyun 			if (!vcpu->arch.ptid)
3301*4882a593Smuzhiyun 				thr0_done = true;
3302*4882a593Smuzhiyun 			active |= 1 << (thr + vcpu->arch.ptid);
3303*4882a593Smuzhiyun 		}
3304*4882a593Smuzhiyun 		/*
3305*4882a593Smuzhiyun 		 * We need to start the first thread of each subcore
3306*4882a593Smuzhiyun 		 * even if it doesn't have a vcpu.
3307*4882a593Smuzhiyun 		 */
3308*4882a593Smuzhiyun 		if (!thr0_done)
3309*4882a593Smuzhiyun 			kvmppc_start_thread(NULL, pvc);
3310*4882a593Smuzhiyun 	}
3311*4882a593Smuzhiyun 
3312*4882a593Smuzhiyun 	/*
3313*4882a593Smuzhiyun 	 * Ensure that split_info.do_nap is set after setting
3314*4882a593Smuzhiyun 	 * the vcore pointer in the PACA of the secondaries.
3315*4882a593Smuzhiyun 	 */
3316*4882a593Smuzhiyun 	smp_mb();
3317*4882a593Smuzhiyun 
3318*4882a593Smuzhiyun 	/*
3319*4882a593Smuzhiyun 	 * When doing micro-threading, poke the inactive threads as well.
3320*4882a593Smuzhiyun 	 * This gets them to the nap instruction after kvm_do_nap,
3321*4882a593Smuzhiyun 	 * which reduces the time taken to unsplit later.
3322*4882a593Smuzhiyun 	 * For POWER9 HPT guest on radix host, we need all the secondary
3323*4882a593Smuzhiyun 	 * threads woken up so they can do the LPCR/LPIDR change.
3324*4882a593Smuzhiyun 	 */
3325*4882a593Smuzhiyun 	if (cmd_bit || hpt_on_radix) {
3326*4882a593Smuzhiyun 		split_info.do_nap = 1;	/* ask secondaries to nap when done */
3327*4882a593Smuzhiyun 		for (thr = 1; thr < threads_per_subcore; ++thr)
3328*4882a593Smuzhiyun 			if (!(active & (1 << thr)))
3329*4882a593Smuzhiyun 				kvmppc_ipi_thread(pcpu + thr);
3330*4882a593Smuzhiyun 	}
3331*4882a593Smuzhiyun 
3332*4882a593Smuzhiyun 	vc->vcore_state = VCORE_RUNNING;
3333*4882a593Smuzhiyun 	preempt_disable();
3334*4882a593Smuzhiyun 
3335*4882a593Smuzhiyun 	trace_kvmppc_run_core(vc, 0);
3336*4882a593Smuzhiyun 
3337*4882a593Smuzhiyun 	for (sub = 0; sub < core_info.n_subcores; ++sub)
3338*4882a593Smuzhiyun 		spin_unlock(&core_info.vc[sub]->lock);
3339*4882a593Smuzhiyun 
3340*4882a593Smuzhiyun 	guest_enter_irqoff();
3341*4882a593Smuzhiyun 
3342*4882a593Smuzhiyun 	srcu_idx = srcu_read_lock(&vc->kvm->srcu);
3343*4882a593Smuzhiyun 
3344*4882a593Smuzhiyun 	this_cpu_disable_ftrace();
3345*4882a593Smuzhiyun 
3346*4882a593Smuzhiyun 	/*
3347*4882a593Smuzhiyun 	 * Interrupts will be enabled once we get into the guest,
3348*4882a593Smuzhiyun 	 * so tell lockdep that we're about to enable interrupts.
3349*4882a593Smuzhiyun 	 */
3350*4882a593Smuzhiyun 	trace_hardirqs_on();
3351*4882a593Smuzhiyun 
3352*4882a593Smuzhiyun 	trap = __kvmppc_vcore_entry();
3353*4882a593Smuzhiyun 
3354*4882a593Smuzhiyun 	trace_hardirqs_off();
3355*4882a593Smuzhiyun 
3356*4882a593Smuzhiyun 	this_cpu_enable_ftrace();
3357*4882a593Smuzhiyun 
3358*4882a593Smuzhiyun 	srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
3359*4882a593Smuzhiyun 
3360*4882a593Smuzhiyun 	set_irq_happened(trap);
3361*4882a593Smuzhiyun 
3362*4882a593Smuzhiyun 	spin_lock(&vc->lock);
3363*4882a593Smuzhiyun 	/* prevent other vcpu threads from doing kvmppc_start_thread() now */
3364*4882a593Smuzhiyun 	vc->vcore_state = VCORE_EXITING;
3365*4882a593Smuzhiyun 
3366*4882a593Smuzhiyun 	/* wait for secondary threads to finish writing their state to memory */
3367*4882a593Smuzhiyun 	kvmppc_wait_for_nap(controlled_threads);
3368*4882a593Smuzhiyun 
3369*4882a593Smuzhiyun 	/* Return to whole-core mode if we split the core earlier */
3370*4882a593Smuzhiyun 	if (cmd_bit) {
3371*4882a593Smuzhiyun 		unsigned long hid0 = mfspr(SPRN_HID0);
3372*4882a593Smuzhiyun 		unsigned long loops = 0;
3373*4882a593Smuzhiyun 
3374*4882a593Smuzhiyun 		hid0 &= ~HID0_POWER8_DYNLPARDIS;
3375*4882a593Smuzhiyun 		stat_bit = HID0_POWER8_2LPARMODE | HID0_POWER8_4LPARMODE;
3376*4882a593Smuzhiyun 		mb();
3377*4882a593Smuzhiyun 		mtspr(SPRN_HID0, hid0);
3378*4882a593Smuzhiyun 		isync();
3379*4882a593Smuzhiyun 		for (;;) {
3380*4882a593Smuzhiyun 			hid0 = mfspr(SPRN_HID0);
3381*4882a593Smuzhiyun 			if (!(hid0 & stat_bit))
3382*4882a593Smuzhiyun 				break;
3383*4882a593Smuzhiyun 			cpu_relax();
3384*4882a593Smuzhiyun 			++loops;
3385*4882a593Smuzhiyun 		}
3386*4882a593Smuzhiyun 	} else if (hpt_on_radix) {
3387*4882a593Smuzhiyun 		/* Wait for all threads to have seen final sync */
3388*4882a593Smuzhiyun 		for (thr = 1; thr < controlled_threads; ++thr) {
3389*4882a593Smuzhiyun 			struct paca_struct *paca = paca_ptrs[pcpu + thr];
3390*4882a593Smuzhiyun 
3391*4882a593Smuzhiyun 			while (paca->kvm_hstate.kvm_split_mode) {
3392*4882a593Smuzhiyun 				HMT_low();
3393*4882a593Smuzhiyun 				barrier();
3394*4882a593Smuzhiyun 			}
3395*4882a593Smuzhiyun 			HMT_medium();
3396*4882a593Smuzhiyun 		}
3397*4882a593Smuzhiyun 	}
3398*4882a593Smuzhiyun 	split_info.do_nap = 0;
3399*4882a593Smuzhiyun 
3400*4882a593Smuzhiyun 	kvmppc_set_host_core(pcpu);
3401*4882a593Smuzhiyun 
3402*4882a593Smuzhiyun 	context_tracking_guest_exit();
3403*4882a593Smuzhiyun 	if (!vtime_accounting_enabled_this_cpu()) {
3404*4882a593Smuzhiyun 		local_irq_enable();
3405*4882a593Smuzhiyun 		/*
3406*4882a593Smuzhiyun 		 * Service IRQs here before vtime_account_guest_exit() so any
3407*4882a593Smuzhiyun 		 * ticks that occurred while running the guest are accounted to
3408*4882a593Smuzhiyun 		 * the guest. If vtime accounting is enabled, accounting uses
3409*4882a593Smuzhiyun 		 * TB rather than ticks, so it can be done without enabling
3410*4882a593Smuzhiyun 		 * interrupts here, which has the problem that it accounts
3411*4882a593Smuzhiyun 		 * interrupt processing overhead to the host.
3412*4882a593Smuzhiyun 		 */
3413*4882a593Smuzhiyun 		local_irq_disable();
3414*4882a593Smuzhiyun 	}
3415*4882a593Smuzhiyun 	vtime_account_guest_exit();
3416*4882a593Smuzhiyun 
3417*4882a593Smuzhiyun 	local_irq_enable();
3418*4882a593Smuzhiyun 
3419*4882a593Smuzhiyun 	/* Let secondaries go back to the offline loop */
3420*4882a593Smuzhiyun 	for (i = 0; i < controlled_threads; ++i) {
3421*4882a593Smuzhiyun 		kvmppc_release_hwthread(pcpu + i);
3422*4882a593Smuzhiyun 		if (sip && sip->napped[i])
3423*4882a593Smuzhiyun 			kvmppc_ipi_thread(pcpu + i);
3424*4882a593Smuzhiyun 		cpumask_clear_cpu(pcpu + i, &vc->kvm->arch.cpu_in_guest);
3425*4882a593Smuzhiyun 	}
3426*4882a593Smuzhiyun 
3427*4882a593Smuzhiyun 	spin_unlock(&vc->lock);
3428*4882a593Smuzhiyun 
3429*4882a593Smuzhiyun 	/* make sure updates to secondary vcpu structs are visible now */
3430*4882a593Smuzhiyun 	smp_mb();
3431*4882a593Smuzhiyun 
3432*4882a593Smuzhiyun 	preempt_enable();
3433*4882a593Smuzhiyun 
3434*4882a593Smuzhiyun 	for (sub = 0; sub < core_info.n_subcores; ++sub) {
3435*4882a593Smuzhiyun 		pvc = core_info.vc[sub];
3436*4882a593Smuzhiyun 		post_guest_process(pvc, pvc == vc);
3437*4882a593Smuzhiyun 	}
3438*4882a593Smuzhiyun 
3439*4882a593Smuzhiyun 	spin_lock(&vc->lock);
3440*4882a593Smuzhiyun 
3441*4882a593Smuzhiyun  out:
3442*4882a593Smuzhiyun 	vc->vcore_state = VCORE_INACTIVE;
3443*4882a593Smuzhiyun 	trace_kvmppc_run_core(vc, 1);
3444*4882a593Smuzhiyun }
3445*4882a593Smuzhiyun 
3446*4882a593Smuzhiyun /*
3447*4882a593Smuzhiyun  * Load up hypervisor-mode registers on P9.
3448*4882a593Smuzhiyun  */
kvmhv_load_hv_regs_and_go(struct kvm_vcpu * vcpu,u64 time_limit,unsigned long lpcr)3449*4882a593Smuzhiyun static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
3450*4882a593Smuzhiyun 				     unsigned long lpcr)
3451*4882a593Smuzhiyun {
3452*4882a593Smuzhiyun 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
3453*4882a593Smuzhiyun 	s64 hdec;
3454*4882a593Smuzhiyun 	u64 tb, purr, spurr;
3455*4882a593Smuzhiyun 	int trap;
3456*4882a593Smuzhiyun 	unsigned long host_hfscr = mfspr(SPRN_HFSCR);
3457*4882a593Smuzhiyun 	unsigned long host_ciabr = mfspr(SPRN_CIABR);
3458*4882a593Smuzhiyun 	unsigned long host_dawr = mfspr(SPRN_DAWR0);
3459*4882a593Smuzhiyun 	unsigned long host_dawrx = mfspr(SPRN_DAWRX0);
3460*4882a593Smuzhiyun 	unsigned long host_psscr = mfspr(SPRN_PSSCR);
3461*4882a593Smuzhiyun 	unsigned long host_pidr = mfspr(SPRN_PID);
3462*4882a593Smuzhiyun 
3463*4882a593Smuzhiyun 	/*
3464*4882a593Smuzhiyun 	 * P8 and P9 suppress the HDEC exception when LPCR[HDICE] = 0,
3465*4882a593Smuzhiyun 	 * so set HDICE before writing HDEC.
3466*4882a593Smuzhiyun 	 */
3467*4882a593Smuzhiyun 	mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr | LPCR_HDICE);
3468*4882a593Smuzhiyun 	isync();
3469*4882a593Smuzhiyun 
3470*4882a593Smuzhiyun 	hdec = time_limit - mftb();
3471*4882a593Smuzhiyun 	if (hdec < 0) {
3472*4882a593Smuzhiyun 		mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr);
3473*4882a593Smuzhiyun 		isync();
3474*4882a593Smuzhiyun 		return BOOK3S_INTERRUPT_HV_DECREMENTER;
3475*4882a593Smuzhiyun 	}
3476*4882a593Smuzhiyun 	mtspr(SPRN_HDEC, hdec);
3477*4882a593Smuzhiyun 
3478*4882a593Smuzhiyun 	if (vc->tb_offset) {
3479*4882a593Smuzhiyun 		u64 new_tb = mftb() + vc->tb_offset;
3480*4882a593Smuzhiyun 		mtspr(SPRN_TBU40, new_tb);
3481*4882a593Smuzhiyun 		tb = mftb();
3482*4882a593Smuzhiyun 		if ((tb & 0xffffff) < (new_tb & 0xffffff))
3483*4882a593Smuzhiyun 			mtspr(SPRN_TBU40, new_tb + 0x1000000);
3484*4882a593Smuzhiyun 		vc->tb_offset_applied = vc->tb_offset;
3485*4882a593Smuzhiyun 	}
3486*4882a593Smuzhiyun 
3487*4882a593Smuzhiyun 	if (vc->pcr)
3488*4882a593Smuzhiyun 		mtspr(SPRN_PCR, vc->pcr | PCR_MASK);
3489*4882a593Smuzhiyun 	mtspr(SPRN_DPDES, vc->dpdes);
3490*4882a593Smuzhiyun 	mtspr(SPRN_VTB, vc->vtb);
3491*4882a593Smuzhiyun 
3492*4882a593Smuzhiyun 	local_paca->kvm_hstate.host_purr = mfspr(SPRN_PURR);
3493*4882a593Smuzhiyun 	local_paca->kvm_hstate.host_spurr = mfspr(SPRN_SPURR);
3494*4882a593Smuzhiyun 	mtspr(SPRN_PURR, vcpu->arch.purr);
3495*4882a593Smuzhiyun 	mtspr(SPRN_SPURR, vcpu->arch.spurr);
3496*4882a593Smuzhiyun 
3497*4882a593Smuzhiyun 	if (dawr_enabled()) {
3498*4882a593Smuzhiyun 		mtspr(SPRN_DAWR0, vcpu->arch.dawr);
3499*4882a593Smuzhiyun 		mtspr(SPRN_DAWRX0, vcpu->arch.dawrx);
3500*4882a593Smuzhiyun 	}
3501*4882a593Smuzhiyun 	mtspr(SPRN_CIABR, vcpu->arch.ciabr);
3502*4882a593Smuzhiyun 	mtspr(SPRN_IC, vcpu->arch.ic);
3503*4882a593Smuzhiyun 	mtspr(SPRN_PID, vcpu->arch.pid);
3504*4882a593Smuzhiyun 
3505*4882a593Smuzhiyun 	mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC |
3506*4882a593Smuzhiyun 	      (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
3507*4882a593Smuzhiyun 
3508*4882a593Smuzhiyun 	mtspr(SPRN_HFSCR, vcpu->arch.hfscr);
3509*4882a593Smuzhiyun 
3510*4882a593Smuzhiyun 	mtspr(SPRN_SPRG0, vcpu->arch.shregs.sprg0);
3511*4882a593Smuzhiyun 	mtspr(SPRN_SPRG1, vcpu->arch.shregs.sprg1);
3512*4882a593Smuzhiyun 	mtspr(SPRN_SPRG2, vcpu->arch.shregs.sprg2);
3513*4882a593Smuzhiyun 	mtspr(SPRN_SPRG3, vcpu->arch.shregs.sprg3);
3514*4882a593Smuzhiyun 
3515*4882a593Smuzhiyun 	mtspr(SPRN_AMOR, ~0UL);
3516*4882a593Smuzhiyun 
3517*4882a593Smuzhiyun 	mtspr(SPRN_LPCR, lpcr);
3518*4882a593Smuzhiyun 	isync();
3519*4882a593Smuzhiyun 
3520*4882a593Smuzhiyun 	kvmppc_xive_push_vcpu(vcpu);
3521*4882a593Smuzhiyun 
3522*4882a593Smuzhiyun 	mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0);
3523*4882a593Smuzhiyun 	mtspr(SPRN_SRR1, vcpu->arch.shregs.srr1);
3524*4882a593Smuzhiyun 
3525*4882a593Smuzhiyun 	trap = __kvmhv_vcpu_entry_p9(vcpu);
3526*4882a593Smuzhiyun 
3527*4882a593Smuzhiyun 	/* Advance host PURR/SPURR by the amount used by guest */
3528*4882a593Smuzhiyun 	purr = mfspr(SPRN_PURR);
3529*4882a593Smuzhiyun 	spurr = mfspr(SPRN_SPURR);
3530*4882a593Smuzhiyun 	mtspr(SPRN_PURR, local_paca->kvm_hstate.host_purr +
3531*4882a593Smuzhiyun 	      purr - vcpu->arch.purr);
3532*4882a593Smuzhiyun 	mtspr(SPRN_SPURR, local_paca->kvm_hstate.host_spurr +
3533*4882a593Smuzhiyun 	      spurr - vcpu->arch.spurr);
3534*4882a593Smuzhiyun 	vcpu->arch.purr = purr;
3535*4882a593Smuzhiyun 	vcpu->arch.spurr = spurr;
3536*4882a593Smuzhiyun 
3537*4882a593Smuzhiyun 	vcpu->arch.ic = mfspr(SPRN_IC);
3538*4882a593Smuzhiyun 	vcpu->arch.pid = mfspr(SPRN_PID);
3539*4882a593Smuzhiyun 	vcpu->arch.psscr = mfspr(SPRN_PSSCR) & PSSCR_GUEST_VIS;
3540*4882a593Smuzhiyun 
3541*4882a593Smuzhiyun 	vcpu->arch.shregs.sprg0 = mfspr(SPRN_SPRG0);
3542*4882a593Smuzhiyun 	vcpu->arch.shregs.sprg1 = mfspr(SPRN_SPRG1);
3543*4882a593Smuzhiyun 	vcpu->arch.shregs.sprg2 = mfspr(SPRN_SPRG2);
3544*4882a593Smuzhiyun 	vcpu->arch.shregs.sprg3 = mfspr(SPRN_SPRG3);
3545*4882a593Smuzhiyun 
3546*4882a593Smuzhiyun 	/* Preserve PSSCR[FAKE_SUSPEND] until we've called kvmppc_save_tm_hv */
3547*4882a593Smuzhiyun 	mtspr(SPRN_PSSCR, host_psscr |
3548*4882a593Smuzhiyun 	      (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
3549*4882a593Smuzhiyun 	mtspr(SPRN_HFSCR, host_hfscr);
3550*4882a593Smuzhiyun 	mtspr(SPRN_CIABR, host_ciabr);
3551*4882a593Smuzhiyun 	mtspr(SPRN_DAWR0, host_dawr);
3552*4882a593Smuzhiyun 	mtspr(SPRN_DAWRX0, host_dawrx);
3553*4882a593Smuzhiyun 	mtspr(SPRN_PID, host_pidr);
3554*4882a593Smuzhiyun 
3555*4882a593Smuzhiyun 	/*
3556*4882a593Smuzhiyun 	 * Since this is radix, do a eieio; tlbsync; ptesync sequence in
3557*4882a593Smuzhiyun 	 * case we interrupted the guest between a tlbie and a ptesync.
3558*4882a593Smuzhiyun 	 */
3559*4882a593Smuzhiyun 	asm volatile("eieio; tlbsync; ptesync");
3560*4882a593Smuzhiyun 
3561*4882a593Smuzhiyun 	/*
3562*4882a593Smuzhiyun 	 * cp_abort is required if the processor supports local copy-paste
3563*4882a593Smuzhiyun 	 * to clear the copy buffer that was under control of the guest.
3564*4882a593Smuzhiyun 	 */
3565*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_ARCH_31))
3566*4882a593Smuzhiyun 		asm volatile(PPC_CP_ABORT);
3567*4882a593Smuzhiyun 
3568*4882a593Smuzhiyun 	mtspr(SPRN_LPID, vcpu->kvm->arch.host_lpid);	/* restore host LPID */
3569*4882a593Smuzhiyun 	isync();
3570*4882a593Smuzhiyun 
3571*4882a593Smuzhiyun 	vc->dpdes = mfspr(SPRN_DPDES);
3572*4882a593Smuzhiyun 	vc->vtb = mfspr(SPRN_VTB);
3573*4882a593Smuzhiyun 	mtspr(SPRN_DPDES, 0);
3574*4882a593Smuzhiyun 	if (vc->pcr)
3575*4882a593Smuzhiyun 		mtspr(SPRN_PCR, PCR_MASK);
3576*4882a593Smuzhiyun 
3577*4882a593Smuzhiyun 	if (vc->tb_offset_applied) {
3578*4882a593Smuzhiyun 		u64 new_tb = mftb() - vc->tb_offset_applied;
3579*4882a593Smuzhiyun 		mtspr(SPRN_TBU40, new_tb);
3580*4882a593Smuzhiyun 		tb = mftb();
3581*4882a593Smuzhiyun 		if ((tb & 0xffffff) < (new_tb & 0xffffff))
3582*4882a593Smuzhiyun 			mtspr(SPRN_TBU40, new_tb + 0x1000000);
3583*4882a593Smuzhiyun 		vc->tb_offset_applied = 0;
3584*4882a593Smuzhiyun 	}
3585*4882a593Smuzhiyun 
3586*4882a593Smuzhiyun 	mtspr(SPRN_HDEC, 0x7fffffff);
3587*4882a593Smuzhiyun 	mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr);
3588*4882a593Smuzhiyun 
3589*4882a593Smuzhiyun 	return trap;
3590*4882a593Smuzhiyun }
3591*4882a593Smuzhiyun 
3592*4882a593Smuzhiyun /*
3593*4882a593Smuzhiyun  * Virtual-mode guest entry for POWER9 and later when the host and
3594*4882a593Smuzhiyun  * guest are both using the radix MMU.  The LPIDR has already been set.
3595*4882a593Smuzhiyun  */
kvmhv_p9_guest_entry(struct kvm_vcpu * vcpu,u64 time_limit,unsigned long lpcr)3596*4882a593Smuzhiyun static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
3597*4882a593Smuzhiyun 			 unsigned long lpcr)
3598*4882a593Smuzhiyun {
3599*4882a593Smuzhiyun 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
3600*4882a593Smuzhiyun 	unsigned long host_dscr = mfspr(SPRN_DSCR);
3601*4882a593Smuzhiyun 	unsigned long host_tidr = mfspr(SPRN_TIDR);
3602*4882a593Smuzhiyun 	unsigned long host_iamr = mfspr(SPRN_IAMR);
3603*4882a593Smuzhiyun 	unsigned long host_amr = mfspr(SPRN_AMR);
3604*4882a593Smuzhiyun 	unsigned long host_fscr = mfspr(SPRN_FSCR);
3605*4882a593Smuzhiyun 	s64 dec;
3606*4882a593Smuzhiyun 	u64 tb;
3607*4882a593Smuzhiyun 	int trap, save_pmu;
3608*4882a593Smuzhiyun 
3609*4882a593Smuzhiyun 	dec = mfspr(SPRN_DEC);
3610*4882a593Smuzhiyun 	tb = mftb();
3611*4882a593Smuzhiyun 	if (dec < 0)
3612*4882a593Smuzhiyun 		return BOOK3S_INTERRUPT_HV_DECREMENTER;
3613*4882a593Smuzhiyun 	local_paca->kvm_hstate.dec_expires = dec + tb;
3614*4882a593Smuzhiyun 	if (local_paca->kvm_hstate.dec_expires < time_limit)
3615*4882a593Smuzhiyun 		time_limit = local_paca->kvm_hstate.dec_expires;
3616*4882a593Smuzhiyun 
3617*4882a593Smuzhiyun 	vcpu->arch.ceded = 0;
3618*4882a593Smuzhiyun 
3619*4882a593Smuzhiyun 	kvmhv_save_host_pmu();		/* saves it to PACA kvm_hstate */
3620*4882a593Smuzhiyun 
3621*4882a593Smuzhiyun 	kvmppc_subcore_enter_guest();
3622*4882a593Smuzhiyun 
3623*4882a593Smuzhiyun 	vc->entry_exit_map = 1;
3624*4882a593Smuzhiyun 	vc->in_guest = 1;
3625*4882a593Smuzhiyun 
3626*4882a593Smuzhiyun 	if (vcpu->arch.vpa.pinned_addr) {
3627*4882a593Smuzhiyun 		struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
3628*4882a593Smuzhiyun 		u32 yield_count = be32_to_cpu(lp->yield_count) + 1;
3629*4882a593Smuzhiyun 		lp->yield_count = cpu_to_be32(yield_count);
3630*4882a593Smuzhiyun 		vcpu->arch.vpa.dirty = 1;
3631*4882a593Smuzhiyun 	}
3632*4882a593Smuzhiyun 
3633*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_TM) ||
3634*4882a593Smuzhiyun 	    cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
3635*4882a593Smuzhiyun 		kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
3636*4882a593Smuzhiyun 
3637*4882a593Smuzhiyun #ifdef CONFIG_PPC_PSERIES
3638*4882a593Smuzhiyun 	if (kvmhv_on_pseries()) {
3639*4882a593Smuzhiyun 		barrier();
3640*4882a593Smuzhiyun 		if (vcpu->arch.vpa.pinned_addr) {
3641*4882a593Smuzhiyun 			struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
3642*4882a593Smuzhiyun 			get_lppaca()->pmcregs_in_use = lp->pmcregs_in_use;
3643*4882a593Smuzhiyun 		} else {
3644*4882a593Smuzhiyun 			get_lppaca()->pmcregs_in_use = 1;
3645*4882a593Smuzhiyun 		}
3646*4882a593Smuzhiyun 		barrier();
3647*4882a593Smuzhiyun 	}
3648*4882a593Smuzhiyun #endif
3649*4882a593Smuzhiyun 	kvmhv_load_guest_pmu(vcpu);
3650*4882a593Smuzhiyun 
3651*4882a593Smuzhiyun 	msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
3652*4882a593Smuzhiyun 	load_fp_state(&vcpu->arch.fp);
3653*4882a593Smuzhiyun #ifdef CONFIG_ALTIVEC
3654*4882a593Smuzhiyun 	load_vr_state(&vcpu->arch.vr);
3655*4882a593Smuzhiyun #endif
3656*4882a593Smuzhiyun 	mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
3657*4882a593Smuzhiyun 
3658*4882a593Smuzhiyun 	mtspr(SPRN_DSCR, vcpu->arch.dscr);
3659*4882a593Smuzhiyun 	mtspr(SPRN_IAMR, vcpu->arch.iamr);
3660*4882a593Smuzhiyun 	mtspr(SPRN_PSPB, vcpu->arch.pspb);
3661*4882a593Smuzhiyun 	mtspr(SPRN_FSCR, vcpu->arch.fscr);
3662*4882a593Smuzhiyun 	mtspr(SPRN_TAR, vcpu->arch.tar);
3663*4882a593Smuzhiyun 	mtspr(SPRN_EBBHR, vcpu->arch.ebbhr);
3664*4882a593Smuzhiyun 	mtspr(SPRN_EBBRR, vcpu->arch.ebbrr);
3665*4882a593Smuzhiyun 	mtspr(SPRN_BESCR, vcpu->arch.bescr);
3666*4882a593Smuzhiyun 	mtspr(SPRN_WORT, vcpu->arch.wort);
3667*4882a593Smuzhiyun 	mtspr(SPRN_TIDR, vcpu->arch.tid);
3668*4882a593Smuzhiyun 	mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
3669*4882a593Smuzhiyun 	mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
3670*4882a593Smuzhiyun 	mtspr(SPRN_AMR, vcpu->arch.amr);
3671*4882a593Smuzhiyun 	mtspr(SPRN_UAMOR, vcpu->arch.uamor);
3672*4882a593Smuzhiyun 
3673*4882a593Smuzhiyun 	if (!(vcpu->arch.ctrl & 1))
3674*4882a593Smuzhiyun 		mtspr(SPRN_CTRLT, mfspr(SPRN_CTRLF) & ~1);
3675*4882a593Smuzhiyun 
3676*4882a593Smuzhiyun 	mtspr(SPRN_DEC, vcpu->arch.dec_expires - mftb());
3677*4882a593Smuzhiyun 
3678*4882a593Smuzhiyun 	if (kvmhv_on_pseries()) {
3679*4882a593Smuzhiyun 		/*
3680*4882a593Smuzhiyun 		 * We need to save and restore the guest visible part of the
3681*4882a593Smuzhiyun 		 * psscr (i.e. using SPRN_PSSCR_PR) since the hypervisor
3682*4882a593Smuzhiyun 		 * doesn't do this for us. Note only required if pseries since
3683*4882a593Smuzhiyun 		 * this is done in kvmhv_load_hv_regs_and_go() below otherwise.
3684*4882a593Smuzhiyun 		 */
3685*4882a593Smuzhiyun 		unsigned long host_psscr;
3686*4882a593Smuzhiyun 		/* call our hypervisor to load up HV regs and go */
3687*4882a593Smuzhiyun 		struct hv_guest_state hvregs;
3688*4882a593Smuzhiyun 
3689*4882a593Smuzhiyun 		host_psscr = mfspr(SPRN_PSSCR_PR);
3690*4882a593Smuzhiyun 		mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr);
3691*4882a593Smuzhiyun 		kvmhv_save_hv_regs(vcpu, &hvregs);
3692*4882a593Smuzhiyun 		hvregs.lpcr = lpcr;
3693*4882a593Smuzhiyun 		vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
3694*4882a593Smuzhiyun 		hvregs.version = HV_GUEST_STATE_VERSION;
3695*4882a593Smuzhiyun 		if (vcpu->arch.nested) {
3696*4882a593Smuzhiyun 			hvregs.lpid = vcpu->arch.nested->shadow_lpid;
3697*4882a593Smuzhiyun 			hvregs.vcpu_token = vcpu->arch.nested_vcpu_id;
3698*4882a593Smuzhiyun 		} else {
3699*4882a593Smuzhiyun 			hvregs.lpid = vcpu->kvm->arch.lpid;
3700*4882a593Smuzhiyun 			hvregs.vcpu_token = vcpu->vcpu_id;
3701*4882a593Smuzhiyun 		}
3702*4882a593Smuzhiyun 		hvregs.hdec_expiry = time_limit;
3703*4882a593Smuzhiyun 		trap = plpar_hcall_norets(H_ENTER_NESTED, __pa(&hvregs),
3704*4882a593Smuzhiyun 					  __pa(&vcpu->arch.regs));
3705*4882a593Smuzhiyun 		kvmhv_restore_hv_return_state(vcpu, &hvregs);
3706*4882a593Smuzhiyun 		vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
3707*4882a593Smuzhiyun 		vcpu->arch.shregs.dar = mfspr(SPRN_DAR);
3708*4882a593Smuzhiyun 		vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR);
3709*4882a593Smuzhiyun 		vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR);
3710*4882a593Smuzhiyun 		mtspr(SPRN_PSSCR_PR, host_psscr);
3711*4882a593Smuzhiyun 
3712*4882a593Smuzhiyun 		/* H_CEDE has to be handled now, not later */
3713*4882a593Smuzhiyun 		if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested &&
3714*4882a593Smuzhiyun 		    kvmppc_get_gpr(vcpu, 3) == H_CEDE) {
3715*4882a593Smuzhiyun 			kvmppc_nested_cede(vcpu);
3716*4882a593Smuzhiyun 			kvmppc_set_gpr(vcpu, 3, 0);
3717*4882a593Smuzhiyun 			trap = 0;
3718*4882a593Smuzhiyun 		}
3719*4882a593Smuzhiyun 	} else {
3720*4882a593Smuzhiyun 		trap = kvmhv_load_hv_regs_and_go(vcpu, time_limit, lpcr);
3721*4882a593Smuzhiyun 	}
3722*4882a593Smuzhiyun 
3723*4882a593Smuzhiyun 	vcpu->arch.slb_max = 0;
3724*4882a593Smuzhiyun 	dec = mfspr(SPRN_DEC);
3725*4882a593Smuzhiyun 	if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */
3726*4882a593Smuzhiyun 		dec = (s32) dec;
3727*4882a593Smuzhiyun 	tb = mftb();
3728*4882a593Smuzhiyun 	vcpu->arch.dec_expires = dec + tb;
3729*4882a593Smuzhiyun 	vcpu->cpu = -1;
3730*4882a593Smuzhiyun 	vcpu->arch.thread_cpu = -1;
3731*4882a593Smuzhiyun 	/* Save guest CTRL register, set runlatch to 1 */
3732*4882a593Smuzhiyun 	vcpu->arch.ctrl = mfspr(SPRN_CTRLF);
3733*4882a593Smuzhiyun 	if (!(vcpu->arch.ctrl & 1))
3734*4882a593Smuzhiyun 		mtspr(SPRN_CTRLT, vcpu->arch.ctrl | 1);
3735*4882a593Smuzhiyun 
3736*4882a593Smuzhiyun 	vcpu->arch.iamr = mfspr(SPRN_IAMR);
3737*4882a593Smuzhiyun 	vcpu->arch.pspb = mfspr(SPRN_PSPB);
3738*4882a593Smuzhiyun 	vcpu->arch.fscr = mfspr(SPRN_FSCR);
3739*4882a593Smuzhiyun 	vcpu->arch.tar = mfspr(SPRN_TAR);
3740*4882a593Smuzhiyun 	vcpu->arch.ebbhr = mfspr(SPRN_EBBHR);
3741*4882a593Smuzhiyun 	vcpu->arch.ebbrr = mfspr(SPRN_EBBRR);
3742*4882a593Smuzhiyun 	vcpu->arch.bescr = mfspr(SPRN_BESCR);
3743*4882a593Smuzhiyun 	vcpu->arch.wort = mfspr(SPRN_WORT);
3744*4882a593Smuzhiyun 	vcpu->arch.tid = mfspr(SPRN_TIDR);
3745*4882a593Smuzhiyun 	vcpu->arch.amr = mfspr(SPRN_AMR);
3746*4882a593Smuzhiyun 	vcpu->arch.uamor = mfspr(SPRN_UAMOR);
3747*4882a593Smuzhiyun 	vcpu->arch.dscr = mfspr(SPRN_DSCR);
3748*4882a593Smuzhiyun 
3749*4882a593Smuzhiyun 	mtspr(SPRN_PSPB, 0);
3750*4882a593Smuzhiyun 	mtspr(SPRN_WORT, 0);
3751*4882a593Smuzhiyun 	mtspr(SPRN_UAMOR, 0);
3752*4882a593Smuzhiyun 	mtspr(SPRN_DSCR, host_dscr);
3753*4882a593Smuzhiyun 	mtspr(SPRN_TIDR, host_tidr);
3754*4882a593Smuzhiyun 	mtspr(SPRN_IAMR, host_iamr);
3755*4882a593Smuzhiyun 	mtspr(SPRN_PSPB, 0);
3756*4882a593Smuzhiyun 
3757*4882a593Smuzhiyun 	if (host_amr != vcpu->arch.amr)
3758*4882a593Smuzhiyun 		mtspr(SPRN_AMR, host_amr);
3759*4882a593Smuzhiyun 
3760*4882a593Smuzhiyun 	if (host_fscr != vcpu->arch.fscr)
3761*4882a593Smuzhiyun 		mtspr(SPRN_FSCR, host_fscr);
3762*4882a593Smuzhiyun 
3763*4882a593Smuzhiyun 	msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
3764*4882a593Smuzhiyun 	store_fp_state(&vcpu->arch.fp);
3765*4882a593Smuzhiyun #ifdef CONFIG_ALTIVEC
3766*4882a593Smuzhiyun 	store_vr_state(&vcpu->arch.vr);
3767*4882a593Smuzhiyun #endif
3768*4882a593Smuzhiyun 	vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
3769*4882a593Smuzhiyun 
3770*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_TM) ||
3771*4882a593Smuzhiyun 	    cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
3772*4882a593Smuzhiyun 		kvmppc_save_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
3773*4882a593Smuzhiyun 
3774*4882a593Smuzhiyun 	save_pmu = 1;
3775*4882a593Smuzhiyun 	if (vcpu->arch.vpa.pinned_addr) {
3776*4882a593Smuzhiyun 		struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
3777*4882a593Smuzhiyun 		u32 yield_count = be32_to_cpu(lp->yield_count) + 1;
3778*4882a593Smuzhiyun 		lp->yield_count = cpu_to_be32(yield_count);
3779*4882a593Smuzhiyun 		vcpu->arch.vpa.dirty = 1;
3780*4882a593Smuzhiyun 		save_pmu = lp->pmcregs_in_use;
3781*4882a593Smuzhiyun 	}
3782*4882a593Smuzhiyun 	/* Must save pmu if this guest is capable of running nested guests */
3783*4882a593Smuzhiyun 	save_pmu |= nesting_enabled(vcpu->kvm);
3784*4882a593Smuzhiyun 
3785*4882a593Smuzhiyun 	kvmhv_save_guest_pmu(vcpu, save_pmu);
3786*4882a593Smuzhiyun #ifdef CONFIG_PPC_PSERIES
3787*4882a593Smuzhiyun 	if (kvmhv_on_pseries()) {
3788*4882a593Smuzhiyun 		barrier();
3789*4882a593Smuzhiyun 		get_lppaca()->pmcregs_in_use = ppc_get_pmu_inuse();
3790*4882a593Smuzhiyun 		barrier();
3791*4882a593Smuzhiyun 	}
3792*4882a593Smuzhiyun #endif
3793*4882a593Smuzhiyun 
3794*4882a593Smuzhiyun 	vc->entry_exit_map = 0x101;
3795*4882a593Smuzhiyun 	vc->in_guest = 0;
3796*4882a593Smuzhiyun 
3797*4882a593Smuzhiyun 	mtspr(SPRN_DEC, local_paca->kvm_hstate.dec_expires - mftb());
3798*4882a593Smuzhiyun 	mtspr(SPRN_SPRG_VDSO_WRITE, local_paca->sprg_vdso);
3799*4882a593Smuzhiyun 
3800*4882a593Smuzhiyun 	kvmhv_load_host_pmu();
3801*4882a593Smuzhiyun 
3802*4882a593Smuzhiyun 	kvmppc_subcore_exit_guest();
3803*4882a593Smuzhiyun 
3804*4882a593Smuzhiyun 	return trap;
3805*4882a593Smuzhiyun }
3806*4882a593Smuzhiyun 
3807*4882a593Smuzhiyun /*
3808*4882a593Smuzhiyun  * Wait for some other vcpu thread to execute us, and
3809*4882a593Smuzhiyun  * wake us up when we need to handle something in the host.
3810*4882a593Smuzhiyun  */
kvmppc_wait_for_exec(struct kvmppc_vcore * vc,struct kvm_vcpu * vcpu,int wait_state)3811*4882a593Smuzhiyun static void kvmppc_wait_for_exec(struct kvmppc_vcore *vc,
3812*4882a593Smuzhiyun 				 struct kvm_vcpu *vcpu, int wait_state)
3813*4882a593Smuzhiyun {
3814*4882a593Smuzhiyun 	DEFINE_WAIT(wait);
3815*4882a593Smuzhiyun 
3816*4882a593Smuzhiyun 	prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state);
3817*4882a593Smuzhiyun 	if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
3818*4882a593Smuzhiyun 		spin_unlock(&vc->lock);
3819*4882a593Smuzhiyun 		schedule();
3820*4882a593Smuzhiyun 		spin_lock(&vc->lock);
3821*4882a593Smuzhiyun 	}
3822*4882a593Smuzhiyun 	finish_wait(&vcpu->arch.cpu_run, &wait);
3823*4882a593Smuzhiyun }
3824*4882a593Smuzhiyun 
grow_halt_poll_ns(struct kvmppc_vcore * vc)3825*4882a593Smuzhiyun static void grow_halt_poll_ns(struct kvmppc_vcore *vc)
3826*4882a593Smuzhiyun {
3827*4882a593Smuzhiyun 	if (!halt_poll_ns_grow)
3828*4882a593Smuzhiyun 		return;
3829*4882a593Smuzhiyun 
3830*4882a593Smuzhiyun 	vc->halt_poll_ns *= halt_poll_ns_grow;
3831*4882a593Smuzhiyun 	if (vc->halt_poll_ns < halt_poll_ns_grow_start)
3832*4882a593Smuzhiyun 		vc->halt_poll_ns = halt_poll_ns_grow_start;
3833*4882a593Smuzhiyun }
3834*4882a593Smuzhiyun 
shrink_halt_poll_ns(struct kvmppc_vcore * vc)3835*4882a593Smuzhiyun static void shrink_halt_poll_ns(struct kvmppc_vcore *vc)
3836*4882a593Smuzhiyun {
3837*4882a593Smuzhiyun 	if (halt_poll_ns_shrink == 0)
3838*4882a593Smuzhiyun 		vc->halt_poll_ns = 0;
3839*4882a593Smuzhiyun 	else
3840*4882a593Smuzhiyun 		vc->halt_poll_ns /= halt_poll_ns_shrink;
3841*4882a593Smuzhiyun }
3842*4882a593Smuzhiyun 
3843*4882a593Smuzhiyun #ifdef CONFIG_KVM_XICS
xive_interrupt_pending(struct kvm_vcpu * vcpu)3844*4882a593Smuzhiyun static inline bool xive_interrupt_pending(struct kvm_vcpu *vcpu)
3845*4882a593Smuzhiyun {
3846*4882a593Smuzhiyun 	if (!xics_on_xive())
3847*4882a593Smuzhiyun 		return false;
3848*4882a593Smuzhiyun 	return vcpu->arch.irq_pending || vcpu->arch.xive_saved_state.pipr <
3849*4882a593Smuzhiyun 		vcpu->arch.xive_saved_state.cppr;
3850*4882a593Smuzhiyun }
3851*4882a593Smuzhiyun #else
xive_interrupt_pending(struct kvm_vcpu * vcpu)3852*4882a593Smuzhiyun static inline bool xive_interrupt_pending(struct kvm_vcpu *vcpu)
3853*4882a593Smuzhiyun {
3854*4882a593Smuzhiyun 	return false;
3855*4882a593Smuzhiyun }
3856*4882a593Smuzhiyun #endif /* CONFIG_KVM_XICS */
3857*4882a593Smuzhiyun 
kvmppc_vcpu_woken(struct kvm_vcpu * vcpu)3858*4882a593Smuzhiyun static bool kvmppc_vcpu_woken(struct kvm_vcpu *vcpu)
3859*4882a593Smuzhiyun {
3860*4882a593Smuzhiyun 	if (vcpu->arch.pending_exceptions || vcpu->arch.prodded ||
3861*4882a593Smuzhiyun 	    kvmppc_doorbell_pending(vcpu) || xive_interrupt_pending(vcpu))
3862*4882a593Smuzhiyun 		return true;
3863*4882a593Smuzhiyun 
3864*4882a593Smuzhiyun 	return false;
3865*4882a593Smuzhiyun }
3866*4882a593Smuzhiyun 
3867*4882a593Smuzhiyun /*
3868*4882a593Smuzhiyun  * Check to see if any of the runnable vcpus on the vcore have pending
3869*4882a593Smuzhiyun  * exceptions or are no longer ceded
3870*4882a593Smuzhiyun  */
kvmppc_vcore_check_block(struct kvmppc_vcore * vc)3871*4882a593Smuzhiyun static int kvmppc_vcore_check_block(struct kvmppc_vcore *vc)
3872*4882a593Smuzhiyun {
3873*4882a593Smuzhiyun 	struct kvm_vcpu *vcpu;
3874*4882a593Smuzhiyun 	int i;
3875*4882a593Smuzhiyun 
3876*4882a593Smuzhiyun 	for_each_runnable_thread(i, vcpu, vc) {
3877*4882a593Smuzhiyun 		if (!vcpu->arch.ceded || kvmppc_vcpu_woken(vcpu))
3878*4882a593Smuzhiyun 			return 1;
3879*4882a593Smuzhiyun 	}
3880*4882a593Smuzhiyun 
3881*4882a593Smuzhiyun 	return 0;
3882*4882a593Smuzhiyun }
3883*4882a593Smuzhiyun 
3884*4882a593Smuzhiyun /*
3885*4882a593Smuzhiyun  * All the vcpus in this vcore are idle, so wait for a decrementer
3886*4882a593Smuzhiyun  * or external interrupt to one of the vcpus.  vc->lock is held.
3887*4882a593Smuzhiyun  */
kvmppc_vcore_blocked(struct kvmppc_vcore * vc)3888*4882a593Smuzhiyun static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
3889*4882a593Smuzhiyun {
3890*4882a593Smuzhiyun 	ktime_t cur, start_poll, start_wait;
3891*4882a593Smuzhiyun 	int do_sleep = 1;
3892*4882a593Smuzhiyun 	u64 block_ns;
3893*4882a593Smuzhiyun 
3894*4882a593Smuzhiyun 	/* Poll for pending exceptions and ceded state */
3895*4882a593Smuzhiyun 	cur = start_poll = ktime_get();
3896*4882a593Smuzhiyun 	if (vc->halt_poll_ns) {
3897*4882a593Smuzhiyun 		ktime_t stop = ktime_add_ns(start_poll, vc->halt_poll_ns);
3898*4882a593Smuzhiyun 		++vc->runner->stat.halt_attempted_poll;
3899*4882a593Smuzhiyun 
3900*4882a593Smuzhiyun 		vc->vcore_state = VCORE_POLLING;
3901*4882a593Smuzhiyun 		spin_unlock(&vc->lock);
3902*4882a593Smuzhiyun 
3903*4882a593Smuzhiyun 		do {
3904*4882a593Smuzhiyun 			if (kvmppc_vcore_check_block(vc)) {
3905*4882a593Smuzhiyun 				do_sleep = 0;
3906*4882a593Smuzhiyun 				break;
3907*4882a593Smuzhiyun 			}
3908*4882a593Smuzhiyun 			cur = ktime_get();
3909*4882a593Smuzhiyun 		} while (single_task_running() && ktime_before(cur, stop));
3910*4882a593Smuzhiyun 
3911*4882a593Smuzhiyun 		spin_lock(&vc->lock);
3912*4882a593Smuzhiyun 		vc->vcore_state = VCORE_INACTIVE;
3913*4882a593Smuzhiyun 
3914*4882a593Smuzhiyun 		if (!do_sleep) {
3915*4882a593Smuzhiyun 			++vc->runner->stat.halt_successful_poll;
3916*4882a593Smuzhiyun 			goto out;
3917*4882a593Smuzhiyun 		}
3918*4882a593Smuzhiyun 	}
3919*4882a593Smuzhiyun 
3920*4882a593Smuzhiyun 	prepare_to_rcuwait(&vc->wait);
3921*4882a593Smuzhiyun 	set_current_state(TASK_INTERRUPTIBLE);
3922*4882a593Smuzhiyun 	if (kvmppc_vcore_check_block(vc)) {
3923*4882a593Smuzhiyun 		finish_rcuwait(&vc->wait);
3924*4882a593Smuzhiyun 		do_sleep = 0;
3925*4882a593Smuzhiyun 		/* If we polled, count this as a successful poll */
3926*4882a593Smuzhiyun 		if (vc->halt_poll_ns)
3927*4882a593Smuzhiyun 			++vc->runner->stat.halt_successful_poll;
3928*4882a593Smuzhiyun 		goto out;
3929*4882a593Smuzhiyun 	}
3930*4882a593Smuzhiyun 
3931*4882a593Smuzhiyun 	start_wait = ktime_get();
3932*4882a593Smuzhiyun 
3933*4882a593Smuzhiyun 	vc->vcore_state = VCORE_SLEEPING;
3934*4882a593Smuzhiyun 	trace_kvmppc_vcore_blocked(vc, 0);
3935*4882a593Smuzhiyun 	spin_unlock(&vc->lock);
3936*4882a593Smuzhiyun 	schedule();
3937*4882a593Smuzhiyun 	finish_rcuwait(&vc->wait);
3938*4882a593Smuzhiyun 	spin_lock(&vc->lock);
3939*4882a593Smuzhiyun 	vc->vcore_state = VCORE_INACTIVE;
3940*4882a593Smuzhiyun 	trace_kvmppc_vcore_blocked(vc, 1);
3941*4882a593Smuzhiyun 	++vc->runner->stat.halt_successful_wait;
3942*4882a593Smuzhiyun 
3943*4882a593Smuzhiyun 	cur = ktime_get();
3944*4882a593Smuzhiyun 
3945*4882a593Smuzhiyun out:
3946*4882a593Smuzhiyun 	block_ns = ktime_to_ns(cur) - ktime_to_ns(start_poll);
3947*4882a593Smuzhiyun 
3948*4882a593Smuzhiyun 	/* Attribute wait time */
3949*4882a593Smuzhiyun 	if (do_sleep) {
3950*4882a593Smuzhiyun 		vc->runner->stat.halt_wait_ns +=
3951*4882a593Smuzhiyun 			ktime_to_ns(cur) - ktime_to_ns(start_wait);
3952*4882a593Smuzhiyun 		/* Attribute failed poll time */
3953*4882a593Smuzhiyun 		if (vc->halt_poll_ns)
3954*4882a593Smuzhiyun 			vc->runner->stat.halt_poll_fail_ns +=
3955*4882a593Smuzhiyun 				ktime_to_ns(start_wait) -
3956*4882a593Smuzhiyun 				ktime_to_ns(start_poll);
3957*4882a593Smuzhiyun 	} else {
3958*4882a593Smuzhiyun 		/* Attribute successful poll time */
3959*4882a593Smuzhiyun 		if (vc->halt_poll_ns)
3960*4882a593Smuzhiyun 			vc->runner->stat.halt_poll_success_ns +=
3961*4882a593Smuzhiyun 				ktime_to_ns(cur) -
3962*4882a593Smuzhiyun 				ktime_to_ns(start_poll);
3963*4882a593Smuzhiyun 	}
3964*4882a593Smuzhiyun 
3965*4882a593Smuzhiyun 	/* Adjust poll time */
3966*4882a593Smuzhiyun 	if (halt_poll_ns) {
3967*4882a593Smuzhiyun 		if (block_ns <= vc->halt_poll_ns)
3968*4882a593Smuzhiyun 			;
3969*4882a593Smuzhiyun 		/* We slept and blocked for longer than the max halt time */
3970*4882a593Smuzhiyun 		else if (vc->halt_poll_ns && block_ns > halt_poll_ns)
3971*4882a593Smuzhiyun 			shrink_halt_poll_ns(vc);
3972*4882a593Smuzhiyun 		/* We slept and our poll time is too small */
3973*4882a593Smuzhiyun 		else if (vc->halt_poll_ns < halt_poll_ns &&
3974*4882a593Smuzhiyun 				block_ns < halt_poll_ns)
3975*4882a593Smuzhiyun 			grow_halt_poll_ns(vc);
3976*4882a593Smuzhiyun 		if (vc->halt_poll_ns > halt_poll_ns)
3977*4882a593Smuzhiyun 			vc->halt_poll_ns = halt_poll_ns;
3978*4882a593Smuzhiyun 	} else
3979*4882a593Smuzhiyun 		vc->halt_poll_ns = 0;
3980*4882a593Smuzhiyun 
3981*4882a593Smuzhiyun 	trace_kvmppc_vcore_wakeup(do_sleep, block_ns);
3982*4882a593Smuzhiyun }
3983*4882a593Smuzhiyun 
3984*4882a593Smuzhiyun /*
3985*4882a593Smuzhiyun  * This never fails for a radix guest, as none of the operations it does
3986*4882a593Smuzhiyun  * for a radix guest can fail or have a way to report failure.
3987*4882a593Smuzhiyun  * kvmhv_run_single_vcpu() relies on this fact.
3988*4882a593Smuzhiyun  */
kvmhv_setup_mmu(struct kvm_vcpu * vcpu)3989*4882a593Smuzhiyun static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu)
3990*4882a593Smuzhiyun {
3991*4882a593Smuzhiyun 	int r = 0;
3992*4882a593Smuzhiyun 	struct kvm *kvm = vcpu->kvm;
3993*4882a593Smuzhiyun 
3994*4882a593Smuzhiyun 	mutex_lock(&kvm->arch.mmu_setup_lock);
3995*4882a593Smuzhiyun 	if (!kvm->arch.mmu_ready) {
3996*4882a593Smuzhiyun 		if (!kvm_is_radix(kvm))
3997*4882a593Smuzhiyun 			r = kvmppc_hv_setup_htab_rma(vcpu);
3998*4882a593Smuzhiyun 		if (!r) {
3999*4882a593Smuzhiyun 			if (cpu_has_feature(CPU_FTR_ARCH_300))
4000*4882a593Smuzhiyun 				kvmppc_setup_partition_table(kvm);
4001*4882a593Smuzhiyun 			kvm->arch.mmu_ready = 1;
4002*4882a593Smuzhiyun 		}
4003*4882a593Smuzhiyun 	}
4004*4882a593Smuzhiyun 	mutex_unlock(&kvm->arch.mmu_setup_lock);
4005*4882a593Smuzhiyun 	return r;
4006*4882a593Smuzhiyun }
4007*4882a593Smuzhiyun 
kvmppc_run_vcpu(struct kvm_vcpu * vcpu)4008*4882a593Smuzhiyun static int kvmppc_run_vcpu(struct kvm_vcpu *vcpu)
4009*4882a593Smuzhiyun {
4010*4882a593Smuzhiyun 	struct kvm_run *run = vcpu->run;
4011*4882a593Smuzhiyun 	int n_ceded, i, r;
4012*4882a593Smuzhiyun 	struct kvmppc_vcore *vc;
4013*4882a593Smuzhiyun 	struct kvm_vcpu *v;
4014*4882a593Smuzhiyun 
4015*4882a593Smuzhiyun 	trace_kvmppc_run_vcpu_enter(vcpu);
4016*4882a593Smuzhiyun 
4017*4882a593Smuzhiyun 	run->exit_reason = 0;
4018*4882a593Smuzhiyun 	vcpu->arch.ret = RESUME_GUEST;
4019*4882a593Smuzhiyun 	vcpu->arch.trap = 0;
4020*4882a593Smuzhiyun 	kvmppc_update_vpas(vcpu);
4021*4882a593Smuzhiyun 
4022*4882a593Smuzhiyun 	/*
4023*4882a593Smuzhiyun 	 * Synchronize with other threads in this virtual core
4024*4882a593Smuzhiyun 	 */
4025*4882a593Smuzhiyun 	vc = vcpu->arch.vcore;
4026*4882a593Smuzhiyun 	spin_lock(&vc->lock);
4027*4882a593Smuzhiyun 	vcpu->arch.ceded = 0;
4028*4882a593Smuzhiyun 	vcpu->arch.run_task = current;
4029*4882a593Smuzhiyun 	vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
4030*4882a593Smuzhiyun 	vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
4031*4882a593Smuzhiyun 	vcpu->arch.busy_preempt = TB_NIL;
4032*4882a593Smuzhiyun 	WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], vcpu);
4033*4882a593Smuzhiyun 	++vc->n_runnable;
4034*4882a593Smuzhiyun 
4035*4882a593Smuzhiyun 	/*
4036*4882a593Smuzhiyun 	 * This happens the first time this is called for a vcpu.
4037*4882a593Smuzhiyun 	 * If the vcore is already running, we may be able to start
4038*4882a593Smuzhiyun 	 * this thread straight away and have it join in.
4039*4882a593Smuzhiyun 	 */
4040*4882a593Smuzhiyun 	if (!signal_pending(current)) {
4041*4882a593Smuzhiyun 		if ((vc->vcore_state == VCORE_PIGGYBACK ||
4042*4882a593Smuzhiyun 		     vc->vcore_state == VCORE_RUNNING) &&
4043*4882a593Smuzhiyun 			   !VCORE_IS_EXITING(vc)) {
4044*4882a593Smuzhiyun 			kvmppc_create_dtl_entry(vcpu, vc);
4045*4882a593Smuzhiyun 			kvmppc_start_thread(vcpu, vc);
4046*4882a593Smuzhiyun 			trace_kvm_guest_enter(vcpu);
4047*4882a593Smuzhiyun 		} else if (vc->vcore_state == VCORE_SLEEPING) {
4048*4882a593Smuzhiyun 		        rcuwait_wake_up(&vc->wait);
4049*4882a593Smuzhiyun 		}
4050*4882a593Smuzhiyun 
4051*4882a593Smuzhiyun 	}
4052*4882a593Smuzhiyun 
4053*4882a593Smuzhiyun 	while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
4054*4882a593Smuzhiyun 	       !signal_pending(current)) {
4055*4882a593Smuzhiyun 		/* See if the MMU is ready to go */
4056*4882a593Smuzhiyun 		if (!vcpu->kvm->arch.mmu_ready) {
4057*4882a593Smuzhiyun 			spin_unlock(&vc->lock);
4058*4882a593Smuzhiyun 			r = kvmhv_setup_mmu(vcpu);
4059*4882a593Smuzhiyun 			spin_lock(&vc->lock);
4060*4882a593Smuzhiyun 			if (r) {
4061*4882a593Smuzhiyun 				run->exit_reason = KVM_EXIT_FAIL_ENTRY;
4062*4882a593Smuzhiyun 				run->fail_entry.
4063*4882a593Smuzhiyun 					hardware_entry_failure_reason = 0;
4064*4882a593Smuzhiyun 				vcpu->arch.ret = r;
4065*4882a593Smuzhiyun 				break;
4066*4882a593Smuzhiyun 			}
4067*4882a593Smuzhiyun 		}
4068*4882a593Smuzhiyun 
4069*4882a593Smuzhiyun 		if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL)
4070*4882a593Smuzhiyun 			kvmppc_vcore_end_preempt(vc);
4071*4882a593Smuzhiyun 
4072*4882a593Smuzhiyun 		if (vc->vcore_state != VCORE_INACTIVE) {
4073*4882a593Smuzhiyun 			kvmppc_wait_for_exec(vc, vcpu, TASK_INTERRUPTIBLE);
4074*4882a593Smuzhiyun 			continue;
4075*4882a593Smuzhiyun 		}
4076*4882a593Smuzhiyun 		for_each_runnable_thread(i, v, vc) {
4077*4882a593Smuzhiyun 			kvmppc_core_prepare_to_enter(v);
4078*4882a593Smuzhiyun 			if (signal_pending(v->arch.run_task)) {
4079*4882a593Smuzhiyun 				kvmppc_remove_runnable(vc, v);
4080*4882a593Smuzhiyun 				v->stat.signal_exits++;
4081*4882a593Smuzhiyun 				v->run->exit_reason = KVM_EXIT_INTR;
4082*4882a593Smuzhiyun 				v->arch.ret = -EINTR;
4083*4882a593Smuzhiyun 				wake_up(&v->arch.cpu_run);
4084*4882a593Smuzhiyun 			}
4085*4882a593Smuzhiyun 		}
4086*4882a593Smuzhiyun 		if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
4087*4882a593Smuzhiyun 			break;
4088*4882a593Smuzhiyun 		n_ceded = 0;
4089*4882a593Smuzhiyun 		for_each_runnable_thread(i, v, vc) {
4090*4882a593Smuzhiyun 			if (!kvmppc_vcpu_woken(v))
4091*4882a593Smuzhiyun 				n_ceded += v->arch.ceded;
4092*4882a593Smuzhiyun 			else
4093*4882a593Smuzhiyun 				v->arch.ceded = 0;
4094*4882a593Smuzhiyun 		}
4095*4882a593Smuzhiyun 		vc->runner = vcpu;
4096*4882a593Smuzhiyun 		if (n_ceded == vc->n_runnable) {
4097*4882a593Smuzhiyun 			kvmppc_vcore_blocked(vc);
4098*4882a593Smuzhiyun 		} else if (need_resched()) {
4099*4882a593Smuzhiyun 			kvmppc_vcore_preempt(vc);
4100*4882a593Smuzhiyun 			/* Let something else run */
4101*4882a593Smuzhiyun 			cond_resched_lock(&vc->lock);
4102*4882a593Smuzhiyun 			if (vc->vcore_state == VCORE_PREEMPT)
4103*4882a593Smuzhiyun 				kvmppc_vcore_end_preempt(vc);
4104*4882a593Smuzhiyun 		} else {
4105*4882a593Smuzhiyun 			kvmppc_run_core(vc);
4106*4882a593Smuzhiyun 		}
4107*4882a593Smuzhiyun 		vc->runner = NULL;
4108*4882a593Smuzhiyun 	}
4109*4882a593Smuzhiyun 
4110*4882a593Smuzhiyun 	while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
4111*4882a593Smuzhiyun 	       (vc->vcore_state == VCORE_RUNNING ||
4112*4882a593Smuzhiyun 		vc->vcore_state == VCORE_EXITING ||
4113*4882a593Smuzhiyun 		vc->vcore_state == VCORE_PIGGYBACK))
4114*4882a593Smuzhiyun 		kvmppc_wait_for_exec(vc, vcpu, TASK_UNINTERRUPTIBLE);
4115*4882a593Smuzhiyun 
4116*4882a593Smuzhiyun 	if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL)
4117*4882a593Smuzhiyun 		kvmppc_vcore_end_preempt(vc);
4118*4882a593Smuzhiyun 
4119*4882a593Smuzhiyun 	if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
4120*4882a593Smuzhiyun 		kvmppc_remove_runnable(vc, vcpu);
4121*4882a593Smuzhiyun 		vcpu->stat.signal_exits++;
4122*4882a593Smuzhiyun 		run->exit_reason = KVM_EXIT_INTR;
4123*4882a593Smuzhiyun 		vcpu->arch.ret = -EINTR;
4124*4882a593Smuzhiyun 	}
4125*4882a593Smuzhiyun 
4126*4882a593Smuzhiyun 	if (vc->n_runnable && vc->vcore_state == VCORE_INACTIVE) {
4127*4882a593Smuzhiyun 		/* Wake up some vcpu to run the core */
4128*4882a593Smuzhiyun 		i = -1;
4129*4882a593Smuzhiyun 		v = next_runnable_thread(vc, &i);
4130*4882a593Smuzhiyun 		wake_up(&v->arch.cpu_run);
4131*4882a593Smuzhiyun 	}
4132*4882a593Smuzhiyun 
4133*4882a593Smuzhiyun 	trace_kvmppc_run_vcpu_exit(vcpu);
4134*4882a593Smuzhiyun 	spin_unlock(&vc->lock);
4135*4882a593Smuzhiyun 	return vcpu->arch.ret;
4136*4882a593Smuzhiyun }
4137*4882a593Smuzhiyun 
kvmhv_run_single_vcpu(struct kvm_vcpu * vcpu,u64 time_limit,unsigned long lpcr)4138*4882a593Smuzhiyun int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
4139*4882a593Smuzhiyun 			  unsigned long lpcr)
4140*4882a593Smuzhiyun {
4141*4882a593Smuzhiyun 	struct kvm_run *run = vcpu->run;
4142*4882a593Smuzhiyun 	int trap, r, pcpu;
4143*4882a593Smuzhiyun 	int srcu_idx, lpid;
4144*4882a593Smuzhiyun 	struct kvmppc_vcore *vc;
4145*4882a593Smuzhiyun 	struct kvm *kvm = vcpu->kvm;
4146*4882a593Smuzhiyun 	struct kvm_nested_guest *nested = vcpu->arch.nested;
4147*4882a593Smuzhiyun 
4148*4882a593Smuzhiyun 	trace_kvmppc_run_vcpu_enter(vcpu);
4149*4882a593Smuzhiyun 
4150*4882a593Smuzhiyun 	run->exit_reason = 0;
4151*4882a593Smuzhiyun 	vcpu->arch.ret = RESUME_GUEST;
4152*4882a593Smuzhiyun 	vcpu->arch.trap = 0;
4153*4882a593Smuzhiyun 
4154*4882a593Smuzhiyun 	vc = vcpu->arch.vcore;
4155*4882a593Smuzhiyun 	vcpu->arch.ceded = 0;
4156*4882a593Smuzhiyun 	vcpu->arch.run_task = current;
4157*4882a593Smuzhiyun 	vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
4158*4882a593Smuzhiyun 	vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
4159*4882a593Smuzhiyun 	vcpu->arch.busy_preempt = TB_NIL;
4160*4882a593Smuzhiyun 	vcpu->arch.last_inst = KVM_INST_FETCH_FAILED;
4161*4882a593Smuzhiyun 	vc->runnable_threads[0] = vcpu;
4162*4882a593Smuzhiyun 	vc->n_runnable = 1;
4163*4882a593Smuzhiyun 	vc->runner = vcpu;
4164*4882a593Smuzhiyun 
4165*4882a593Smuzhiyun 	/* See if the MMU is ready to go */
4166*4882a593Smuzhiyun 	if (!kvm->arch.mmu_ready)
4167*4882a593Smuzhiyun 		kvmhv_setup_mmu(vcpu);
4168*4882a593Smuzhiyun 
4169*4882a593Smuzhiyun 	if (need_resched())
4170*4882a593Smuzhiyun 		cond_resched();
4171*4882a593Smuzhiyun 
4172*4882a593Smuzhiyun 	kvmppc_update_vpas(vcpu);
4173*4882a593Smuzhiyun 
4174*4882a593Smuzhiyun 	init_vcore_to_run(vc);
4175*4882a593Smuzhiyun 	vc->preempt_tb = TB_NIL;
4176*4882a593Smuzhiyun 
4177*4882a593Smuzhiyun 	preempt_disable();
4178*4882a593Smuzhiyun 	pcpu = smp_processor_id();
4179*4882a593Smuzhiyun 	vc->pcpu = pcpu;
4180*4882a593Smuzhiyun 	kvmppc_prepare_radix_vcpu(vcpu, pcpu);
4181*4882a593Smuzhiyun 
4182*4882a593Smuzhiyun 	local_irq_disable();
4183*4882a593Smuzhiyun 	hard_irq_disable();
4184*4882a593Smuzhiyun 	if (signal_pending(current))
4185*4882a593Smuzhiyun 		goto sigpend;
4186*4882a593Smuzhiyun 	if (lazy_irq_pending() || need_resched() || !kvm->arch.mmu_ready)
4187*4882a593Smuzhiyun 		goto out;
4188*4882a593Smuzhiyun 
4189*4882a593Smuzhiyun 	if (!nested) {
4190*4882a593Smuzhiyun 		kvmppc_core_prepare_to_enter(vcpu);
4191*4882a593Smuzhiyun 		if (vcpu->arch.doorbell_request) {
4192*4882a593Smuzhiyun 			vc->dpdes = 1;
4193*4882a593Smuzhiyun 			smp_wmb();
4194*4882a593Smuzhiyun 			vcpu->arch.doorbell_request = 0;
4195*4882a593Smuzhiyun 		}
4196*4882a593Smuzhiyun 		if (test_bit(BOOK3S_IRQPRIO_EXTERNAL,
4197*4882a593Smuzhiyun 			     &vcpu->arch.pending_exceptions))
4198*4882a593Smuzhiyun 			lpcr |= LPCR_MER;
4199*4882a593Smuzhiyun 	} else if (vcpu->arch.pending_exceptions ||
4200*4882a593Smuzhiyun 		   vcpu->arch.doorbell_request ||
4201*4882a593Smuzhiyun 		   xive_interrupt_pending(vcpu)) {
4202*4882a593Smuzhiyun 		vcpu->arch.ret = RESUME_HOST;
4203*4882a593Smuzhiyun 		goto out;
4204*4882a593Smuzhiyun 	}
4205*4882a593Smuzhiyun 
4206*4882a593Smuzhiyun 	kvmppc_clear_host_core(pcpu);
4207*4882a593Smuzhiyun 
4208*4882a593Smuzhiyun 	local_paca->kvm_hstate.tid = 0;
4209*4882a593Smuzhiyun 	local_paca->kvm_hstate.napping = 0;
4210*4882a593Smuzhiyun 	local_paca->kvm_hstate.kvm_split_mode = NULL;
4211*4882a593Smuzhiyun 	kvmppc_start_thread(vcpu, vc);
4212*4882a593Smuzhiyun 	kvmppc_create_dtl_entry(vcpu, vc);
4213*4882a593Smuzhiyun 	trace_kvm_guest_enter(vcpu);
4214*4882a593Smuzhiyun 
4215*4882a593Smuzhiyun 	vc->vcore_state = VCORE_RUNNING;
4216*4882a593Smuzhiyun 	trace_kvmppc_run_core(vc, 0);
4217*4882a593Smuzhiyun 
4218*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_HVMODE)) {
4219*4882a593Smuzhiyun 		lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
4220*4882a593Smuzhiyun 		mtspr(SPRN_LPID, lpid);
4221*4882a593Smuzhiyun 		isync();
4222*4882a593Smuzhiyun 		kvmppc_check_need_tlb_flush(kvm, pcpu, nested);
4223*4882a593Smuzhiyun 	}
4224*4882a593Smuzhiyun 
4225*4882a593Smuzhiyun 	guest_enter_irqoff();
4226*4882a593Smuzhiyun 
4227*4882a593Smuzhiyun 	srcu_idx = srcu_read_lock(&kvm->srcu);
4228*4882a593Smuzhiyun 
4229*4882a593Smuzhiyun 	this_cpu_disable_ftrace();
4230*4882a593Smuzhiyun 
4231*4882a593Smuzhiyun 	/* Tell lockdep that we're about to enable interrupts */
4232*4882a593Smuzhiyun 	trace_hardirqs_on();
4233*4882a593Smuzhiyun 
4234*4882a593Smuzhiyun 	trap = kvmhv_p9_guest_entry(vcpu, time_limit, lpcr);
4235*4882a593Smuzhiyun 	vcpu->arch.trap = trap;
4236*4882a593Smuzhiyun 
4237*4882a593Smuzhiyun 	trace_hardirqs_off();
4238*4882a593Smuzhiyun 
4239*4882a593Smuzhiyun 	this_cpu_enable_ftrace();
4240*4882a593Smuzhiyun 
4241*4882a593Smuzhiyun 	srcu_read_unlock(&kvm->srcu, srcu_idx);
4242*4882a593Smuzhiyun 
4243*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_HVMODE)) {
4244*4882a593Smuzhiyun 		mtspr(SPRN_LPID, kvm->arch.host_lpid);
4245*4882a593Smuzhiyun 		isync();
4246*4882a593Smuzhiyun 	}
4247*4882a593Smuzhiyun 
4248*4882a593Smuzhiyun 	set_irq_happened(trap);
4249*4882a593Smuzhiyun 
4250*4882a593Smuzhiyun 	kvmppc_set_host_core(pcpu);
4251*4882a593Smuzhiyun 
4252*4882a593Smuzhiyun 	context_tracking_guest_exit();
4253*4882a593Smuzhiyun 	if (!vtime_accounting_enabled_this_cpu()) {
4254*4882a593Smuzhiyun 		local_irq_enable();
4255*4882a593Smuzhiyun 		/*
4256*4882a593Smuzhiyun 		 * Service IRQs here before vtime_account_guest_exit() so any
4257*4882a593Smuzhiyun 		 * ticks that occurred while running the guest are accounted to
4258*4882a593Smuzhiyun 		 * the guest. If vtime accounting is enabled, accounting uses
4259*4882a593Smuzhiyun 		 * TB rather than ticks, so it can be done without enabling
4260*4882a593Smuzhiyun 		 * interrupts here, which has the problem that it accounts
4261*4882a593Smuzhiyun 		 * interrupt processing overhead to the host.
4262*4882a593Smuzhiyun 		 */
4263*4882a593Smuzhiyun 		local_irq_disable();
4264*4882a593Smuzhiyun 	}
4265*4882a593Smuzhiyun 	vtime_account_guest_exit();
4266*4882a593Smuzhiyun 
4267*4882a593Smuzhiyun 	local_irq_enable();
4268*4882a593Smuzhiyun 
4269*4882a593Smuzhiyun 	cpumask_clear_cpu(pcpu, &kvm->arch.cpu_in_guest);
4270*4882a593Smuzhiyun 
4271*4882a593Smuzhiyun 	preempt_enable();
4272*4882a593Smuzhiyun 
4273*4882a593Smuzhiyun 	/*
4274*4882a593Smuzhiyun 	 * cancel pending decrementer exception if DEC is now positive, or if
4275*4882a593Smuzhiyun 	 * entering a nested guest in which case the decrementer is now owned
4276*4882a593Smuzhiyun 	 * by L2 and the L1 decrementer is provided in hdec_expires
4277*4882a593Smuzhiyun 	 */
4278*4882a593Smuzhiyun 	if (kvmppc_core_pending_dec(vcpu) &&
4279*4882a593Smuzhiyun 			((get_tb() < vcpu->arch.dec_expires) ||
4280*4882a593Smuzhiyun 			 (trap == BOOK3S_INTERRUPT_SYSCALL &&
4281*4882a593Smuzhiyun 			  kvmppc_get_gpr(vcpu, 3) == H_ENTER_NESTED)))
4282*4882a593Smuzhiyun 		kvmppc_core_dequeue_dec(vcpu);
4283*4882a593Smuzhiyun 
4284*4882a593Smuzhiyun 	trace_kvm_guest_exit(vcpu);
4285*4882a593Smuzhiyun 	r = RESUME_GUEST;
4286*4882a593Smuzhiyun 	if (trap) {
4287*4882a593Smuzhiyun 		if (!nested)
4288*4882a593Smuzhiyun 			r = kvmppc_handle_exit_hv(vcpu, current);
4289*4882a593Smuzhiyun 		else
4290*4882a593Smuzhiyun 			r = kvmppc_handle_nested_exit(vcpu);
4291*4882a593Smuzhiyun 	}
4292*4882a593Smuzhiyun 	vcpu->arch.ret = r;
4293*4882a593Smuzhiyun 
4294*4882a593Smuzhiyun 	if (is_kvmppc_resume_guest(r) && vcpu->arch.ceded &&
4295*4882a593Smuzhiyun 	    !kvmppc_vcpu_woken(vcpu)) {
4296*4882a593Smuzhiyun 		kvmppc_set_timer(vcpu);
4297*4882a593Smuzhiyun 		while (vcpu->arch.ceded && !kvmppc_vcpu_woken(vcpu)) {
4298*4882a593Smuzhiyun 			if (signal_pending(current)) {
4299*4882a593Smuzhiyun 				vcpu->stat.signal_exits++;
4300*4882a593Smuzhiyun 				run->exit_reason = KVM_EXIT_INTR;
4301*4882a593Smuzhiyun 				vcpu->arch.ret = -EINTR;
4302*4882a593Smuzhiyun 				break;
4303*4882a593Smuzhiyun 			}
4304*4882a593Smuzhiyun 			spin_lock(&vc->lock);
4305*4882a593Smuzhiyun 			kvmppc_vcore_blocked(vc);
4306*4882a593Smuzhiyun 			spin_unlock(&vc->lock);
4307*4882a593Smuzhiyun 		}
4308*4882a593Smuzhiyun 	}
4309*4882a593Smuzhiyun 	vcpu->arch.ceded = 0;
4310*4882a593Smuzhiyun 
4311*4882a593Smuzhiyun 	vc->vcore_state = VCORE_INACTIVE;
4312*4882a593Smuzhiyun 	trace_kvmppc_run_core(vc, 1);
4313*4882a593Smuzhiyun 
4314*4882a593Smuzhiyun  done:
4315*4882a593Smuzhiyun 	kvmppc_remove_runnable(vc, vcpu);
4316*4882a593Smuzhiyun 	trace_kvmppc_run_vcpu_exit(vcpu);
4317*4882a593Smuzhiyun 
4318*4882a593Smuzhiyun 	return vcpu->arch.ret;
4319*4882a593Smuzhiyun 
4320*4882a593Smuzhiyun  sigpend:
4321*4882a593Smuzhiyun 	vcpu->stat.signal_exits++;
4322*4882a593Smuzhiyun 	run->exit_reason = KVM_EXIT_INTR;
4323*4882a593Smuzhiyun 	vcpu->arch.ret = -EINTR;
4324*4882a593Smuzhiyun  out:
4325*4882a593Smuzhiyun 	local_irq_enable();
4326*4882a593Smuzhiyun 	preempt_enable();
4327*4882a593Smuzhiyun 	goto done;
4328*4882a593Smuzhiyun }
4329*4882a593Smuzhiyun 
kvmppc_vcpu_run_hv(struct kvm_vcpu * vcpu)4330*4882a593Smuzhiyun static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
4331*4882a593Smuzhiyun {
4332*4882a593Smuzhiyun 	struct kvm_run *run = vcpu->run;
4333*4882a593Smuzhiyun 	int r;
4334*4882a593Smuzhiyun 	int srcu_idx;
4335*4882a593Smuzhiyun 	unsigned long ebb_regs[3] = {};	/* shut up GCC */
4336*4882a593Smuzhiyun 	unsigned long user_tar = 0;
4337*4882a593Smuzhiyun 	unsigned int user_vrsave;
4338*4882a593Smuzhiyun 	struct kvm *kvm;
4339*4882a593Smuzhiyun 
4340*4882a593Smuzhiyun 	if (!vcpu->arch.sane) {
4341*4882a593Smuzhiyun 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
4342*4882a593Smuzhiyun 		return -EINVAL;
4343*4882a593Smuzhiyun 	}
4344*4882a593Smuzhiyun 
4345*4882a593Smuzhiyun 	/*
4346*4882a593Smuzhiyun 	 * Don't allow entry with a suspended transaction, because
4347*4882a593Smuzhiyun 	 * the guest entry/exit code will lose it.
4348*4882a593Smuzhiyun 	 * If the guest has TM enabled, save away their TM-related SPRs
4349*4882a593Smuzhiyun 	 * (they will get restored by the TM unavailable interrupt).
4350*4882a593Smuzhiyun 	 */
4351*4882a593Smuzhiyun #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
4352*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
4353*4882a593Smuzhiyun 	    (current->thread.regs->msr & MSR_TM)) {
4354*4882a593Smuzhiyun 		if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
4355*4882a593Smuzhiyun 			run->exit_reason = KVM_EXIT_FAIL_ENTRY;
4356*4882a593Smuzhiyun 			run->fail_entry.hardware_entry_failure_reason = 0;
4357*4882a593Smuzhiyun 			return -EINVAL;
4358*4882a593Smuzhiyun 		}
4359*4882a593Smuzhiyun 		/* Enable TM so we can read the TM SPRs */
4360*4882a593Smuzhiyun 		mtmsr(mfmsr() | MSR_TM);
4361*4882a593Smuzhiyun 		current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
4362*4882a593Smuzhiyun 		current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
4363*4882a593Smuzhiyun 		current->thread.tm_texasr = mfspr(SPRN_TEXASR);
4364*4882a593Smuzhiyun 		current->thread.regs->msr &= ~MSR_TM;
4365*4882a593Smuzhiyun 	}
4366*4882a593Smuzhiyun #endif
4367*4882a593Smuzhiyun 
4368*4882a593Smuzhiyun 	/*
4369*4882a593Smuzhiyun 	 * Force online to 1 for the sake of old userspace which doesn't
4370*4882a593Smuzhiyun 	 * set it.
4371*4882a593Smuzhiyun 	 */
4372*4882a593Smuzhiyun 	if (!vcpu->arch.online) {
4373*4882a593Smuzhiyun 		atomic_inc(&vcpu->arch.vcore->online_count);
4374*4882a593Smuzhiyun 		vcpu->arch.online = 1;
4375*4882a593Smuzhiyun 	}
4376*4882a593Smuzhiyun 
4377*4882a593Smuzhiyun 	kvmppc_core_prepare_to_enter(vcpu);
4378*4882a593Smuzhiyun 
4379*4882a593Smuzhiyun 	/* No need to go into the guest when all we'll do is come back out */
4380*4882a593Smuzhiyun 	if (signal_pending(current)) {
4381*4882a593Smuzhiyun 		run->exit_reason = KVM_EXIT_INTR;
4382*4882a593Smuzhiyun 		return -EINTR;
4383*4882a593Smuzhiyun 	}
4384*4882a593Smuzhiyun 
4385*4882a593Smuzhiyun 	kvm = vcpu->kvm;
4386*4882a593Smuzhiyun 	atomic_inc(&kvm->arch.vcpus_running);
4387*4882a593Smuzhiyun 	/* Order vcpus_running vs. mmu_ready, see kvmppc_alloc_reset_hpt */
4388*4882a593Smuzhiyun 	smp_mb();
4389*4882a593Smuzhiyun 
4390*4882a593Smuzhiyun 	flush_all_to_thread(current);
4391*4882a593Smuzhiyun 
4392*4882a593Smuzhiyun 	/* Save userspace EBB and other register values */
4393*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
4394*4882a593Smuzhiyun 		ebb_regs[0] = mfspr(SPRN_EBBHR);
4395*4882a593Smuzhiyun 		ebb_regs[1] = mfspr(SPRN_EBBRR);
4396*4882a593Smuzhiyun 		ebb_regs[2] = mfspr(SPRN_BESCR);
4397*4882a593Smuzhiyun 		user_tar = mfspr(SPRN_TAR);
4398*4882a593Smuzhiyun 	}
4399*4882a593Smuzhiyun 	user_vrsave = mfspr(SPRN_VRSAVE);
4400*4882a593Smuzhiyun 
4401*4882a593Smuzhiyun 	vcpu->arch.waitp = &vcpu->arch.vcore->wait;
4402*4882a593Smuzhiyun 	vcpu->arch.pgdir = kvm->mm->pgd;
4403*4882a593Smuzhiyun 	vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
4404*4882a593Smuzhiyun 
4405*4882a593Smuzhiyun 	do {
4406*4882a593Smuzhiyun 		/*
4407*4882a593Smuzhiyun 		 * The early POWER9 chips that can't mix radix and HPT threads
4408*4882a593Smuzhiyun 		 * on the same core also need the workaround for the problem
4409*4882a593Smuzhiyun 		 * where the TLB would prefetch entries in the guest exit path
4410*4882a593Smuzhiyun 		 * for radix guests using the guest PIDR value and LPID 0.
4411*4882a593Smuzhiyun 		 * The workaround is in the old path (kvmppc_run_vcpu())
4412*4882a593Smuzhiyun 		 * but not the new path (kvmhv_run_single_vcpu()).
4413*4882a593Smuzhiyun 		 */
4414*4882a593Smuzhiyun 		if (kvm->arch.threads_indep && kvm_is_radix(kvm) &&
4415*4882a593Smuzhiyun 		    !no_mixing_hpt_and_radix)
4416*4882a593Smuzhiyun 			r = kvmhv_run_single_vcpu(vcpu, ~(u64)0,
4417*4882a593Smuzhiyun 						  vcpu->arch.vcore->lpcr);
4418*4882a593Smuzhiyun 		else
4419*4882a593Smuzhiyun 			r = kvmppc_run_vcpu(vcpu);
4420*4882a593Smuzhiyun 
4421*4882a593Smuzhiyun 		if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
4422*4882a593Smuzhiyun 		    !(vcpu->arch.shregs.msr & MSR_PR)) {
4423*4882a593Smuzhiyun 			trace_kvm_hcall_enter(vcpu);
4424*4882a593Smuzhiyun 			r = kvmppc_pseries_do_hcall(vcpu);
4425*4882a593Smuzhiyun 			trace_kvm_hcall_exit(vcpu, r);
4426*4882a593Smuzhiyun 			kvmppc_core_prepare_to_enter(vcpu);
4427*4882a593Smuzhiyun 		} else if (r == RESUME_PAGE_FAULT) {
4428*4882a593Smuzhiyun 			srcu_idx = srcu_read_lock(&kvm->srcu);
4429*4882a593Smuzhiyun 			r = kvmppc_book3s_hv_page_fault(vcpu,
4430*4882a593Smuzhiyun 				vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
4431*4882a593Smuzhiyun 			srcu_read_unlock(&kvm->srcu, srcu_idx);
4432*4882a593Smuzhiyun 		} else if (r == RESUME_PASSTHROUGH) {
4433*4882a593Smuzhiyun 			if (WARN_ON(xics_on_xive()))
4434*4882a593Smuzhiyun 				r = H_SUCCESS;
4435*4882a593Smuzhiyun 			else
4436*4882a593Smuzhiyun 				r = kvmppc_xics_rm_complete(vcpu, 0);
4437*4882a593Smuzhiyun 		}
4438*4882a593Smuzhiyun 	} while (is_kvmppc_resume_guest(r));
4439*4882a593Smuzhiyun 
4440*4882a593Smuzhiyun 	/* Restore userspace EBB and other register values */
4441*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
4442*4882a593Smuzhiyun 		mtspr(SPRN_EBBHR, ebb_regs[0]);
4443*4882a593Smuzhiyun 		mtspr(SPRN_EBBRR, ebb_regs[1]);
4444*4882a593Smuzhiyun 		mtspr(SPRN_BESCR, ebb_regs[2]);
4445*4882a593Smuzhiyun 		mtspr(SPRN_TAR, user_tar);
4446*4882a593Smuzhiyun 		mtspr(SPRN_FSCR, current->thread.fscr);
4447*4882a593Smuzhiyun 	}
4448*4882a593Smuzhiyun 	mtspr(SPRN_VRSAVE, user_vrsave);
4449*4882a593Smuzhiyun 
4450*4882a593Smuzhiyun 	vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
4451*4882a593Smuzhiyun 	atomic_dec(&kvm->arch.vcpus_running);
4452*4882a593Smuzhiyun 	return r;
4453*4882a593Smuzhiyun }
4454*4882a593Smuzhiyun 
kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size ** sps,int shift,int sllp)4455*4882a593Smuzhiyun static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
4456*4882a593Smuzhiyun 				     int shift, int sllp)
4457*4882a593Smuzhiyun {
4458*4882a593Smuzhiyun 	(*sps)->page_shift = shift;
4459*4882a593Smuzhiyun 	(*sps)->slb_enc = sllp;
4460*4882a593Smuzhiyun 	(*sps)->enc[0].page_shift = shift;
4461*4882a593Smuzhiyun 	(*sps)->enc[0].pte_enc = kvmppc_pgsize_lp_encoding(shift, shift);
4462*4882a593Smuzhiyun 	/*
4463*4882a593Smuzhiyun 	 * Add 16MB MPSS support (may get filtered out by userspace)
4464*4882a593Smuzhiyun 	 */
4465*4882a593Smuzhiyun 	if (shift != 24) {
4466*4882a593Smuzhiyun 		int penc = kvmppc_pgsize_lp_encoding(shift, 24);
4467*4882a593Smuzhiyun 		if (penc != -1) {
4468*4882a593Smuzhiyun 			(*sps)->enc[1].page_shift = 24;
4469*4882a593Smuzhiyun 			(*sps)->enc[1].pte_enc = penc;
4470*4882a593Smuzhiyun 		}
4471*4882a593Smuzhiyun 	}
4472*4882a593Smuzhiyun 	(*sps)++;
4473*4882a593Smuzhiyun }
4474*4882a593Smuzhiyun 
kvm_vm_ioctl_get_smmu_info_hv(struct kvm * kvm,struct kvm_ppc_smmu_info * info)4475*4882a593Smuzhiyun static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm,
4476*4882a593Smuzhiyun 					 struct kvm_ppc_smmu_info *info)
4477*4882a593Smuzhiyun {
4478*4882a593Smuzhiyun 	struct kvm_ppc_one_seg_page_size *sps;
4479*4882a593Smuzhiyun 
4480*4882a593Smuzhiyun 	/*
4481*4882a593Smuzhiyun 	 * POWER7, POWER8 and POWER9 all support 32 storage keys for data.
4482*4882a593Smuzhiyun 	 * POWER7 doesn't support keys for instruction accesses,
4483*4882a593Smuzhiyun 	 * POWER8 and POWER9 do.
4484*4882a593Smuzhiyun 	 */
4485*4882a593Smuzhiyun 	info->data_keys = 32;
4486*4882a593Smuzhiyun 	info->instr_keys = cpu_has_feature(CPU_FTR_ARCH_207S) ? 32 : 0;
4487*4882a593Smuzhiyun 
4488*4882a593Smuzhiyun 	/* POWER7, 8 and 9 all have 1T segments and 32-entry SLB */
4489*4882a593Smuzhiyun 	info->flags = KVM_PPC_PAGE_SIZES_REAL | KVM_PPC_1T_SEGMENTS;
4490*4882a593Smuzhiyun 	info->slb_size = 32;
4491*4882a593Smuzhiyun 
4492*4882a593Smuzhiyun 	/* We only support these sizes for now, and no muti-size segments */
4493*4882a593Smuzhiyun 	sps = &info->sps[0];
4494*4882a593Smuzhiyun 	kvmppc_add_seg_page_size(&sps, 12, 0);
4495*4882a593Smuzhiyun 	kvmppc_add_seg_page_size(&sps, 16, SLB_VSID_L | SLB_VSID_LP_01);
4496*4882a593Smuzhiyun 	kvmppc_add_seg_page_size(&sps, 24, SLB_VSID_L);
4497*4882a593Smuzhiyun 
4498*4882a593Smuzhiyun 	/* If running as a nested hypervisor, we don't support HPT guests */
4499*4882a593Smuzhiyun 	if (kvmhv_on_pseries())
4500*4882a593Smuzhiyun 		info->flags |= KVM_PPC_NO_HASH;
4501*4882a593Smuzhiyun 
4502*4882a593Smuzhiyun 	return 0;
4503*4882a593Smuzhiyun }
4504*4882a593Smuzhiyun 
4505*4882a593Smuzhiyun /*
4506*4882a593Smuzhiyun  * Get (and clear) the dirty memory log for a memory slot.
4507*4882a593Smuzhiyun  */
kvm_vm_ioctl_get_dirty_log_hv(struct kvm * kvm,struct kvm_dirty_log * log)4508*4882a593Smuzhiyun static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm,
4509*4882a593Smuzhiyun 					 struct kvm_dirty_log *log)
4510*4882a593Smuzhiyun {
4511*4882a593Smuzhiyun 	struct kvm_memslots *slots;
4512*4882a593Smuzhiyun 	struct kvm_memory_slot *memslot;
4513*4882a593Smuzhiyun 	int i, r;
4514*4882a593Smuzhiyun 	unsigned long n;
4515*4882a593Smuzhiyun 	unsigned long *buf, *p;
4516*4882a593Smuzhiyun 	struct kvm_vcpu *vcpu;
4517*4882a593Smuzhiyun 
4518*4882a593Smuzhiyun 	mutex_lock(&kvm->slots_lock);
4519*4882a593Smuzhiyun 
4520*4882a593Smuzhiyun 	r = -EINVAL;
4521*4882a593Smuzhiyun 	if (log->slot >= KVM_USER_MEM_SLOTS)
4522*4882a593Smuzhiyun 		goto out;
4523*4882a593Smuzhiyun 
4524*4882a593Smuzhiyun 	slots = kvm_memslots(kvm);
4525*4882a593Smuzhiyun 	memslot = id_to_memslot(slots, log->slot);
4526*4882a593Smuzhiyun 	r = -ENOENT;
4527*4882a593Smuzhiyun 	if (!memslot || !memslot->dirty_bitmap)
4528*4882a593Smuzhiyun 		goto out;
4529*4882a593Smuzhiyun 
4530*4882a593Smuzhiyun 	/*
4531*4882a593Smuzhiyun 	 * Use second half of bitmap area because both HPT and radix
4532*4882a593Smuzhiyun 	 * accumulate bits in the first half.
4533*4882a593Smuzhiyun 	 */
4534*4882a593Smuzhiyun 	n = kvm_dirty_bitmap_bytes(memslot);
4535*4882a593Smuzhiyun 	buf = memslot->dirty_bitmap + n / sizeof(long);
4536*4882a593Smuzhiyun 	memset(buf, 0, n);
4537*4882a593Smuzhiyun 
4538*4882a593Smuzhiyun 	if (kvm_is_radix(kvm))
4539*4882a593Smuzhiyun 		r = kvmppc_hv_get_dirty_log_radix(kvm, memslot, buf);
4540*4882a593Smuzhiyun 	else
4541*4882a593Smuzhiyun 		r = kvmppc_hv_get_dirty_log_hpt(kvm, memslot, buf);
4542*4882a593Smuzhiyun 	if (r)
4543*4882a593Smuzhiyun 		goto out;
4544*4882a593Smuzhiyun 
4545*4882a593Smuzhiyun 	/*
4546*4882a593Smuzhiyun 	 * We accumulate dirty bits in the first half of the
4547*4882a593Smuzhiyun 	 * memslot's dirty_bitmap area, for when pages are paged
4548*4882a593Smuzhiyun 	 * out or modified by the host directly.  Pick up these
4549*4882a593Smuzhiyun 	 * bits and add them to the map.
4550*4882a593Smuzhiyun 	 */
4551*4882a593Smuzhiyun 	p = memslot->dirty_bitmap;
4552*4882a593Smuzhiyun 	for (i = 0; i < n / sizeof(long); ++i)
4553*4882a593Smuzhiyun 		buf[i] |= xchg(&p[i], 0);
4554*4882a593Smuzhiyun 
4555*4882a593Smuzhiyun 	/* Harvest dirty bits from VPA and DTL updates */
4556*4882a593Smuzhiyun 	/* Note: we never modify the SLB shadow buffer areas */
4557*4882a593Smuzhiyun 	kvm_for_each_vcpu(i, vcpu, kvm) {
4558*4882a593Smuzhiyun 		spin_lock(&vcpu->arch.vpa_update_lock);
4559*4882a593Smuzhiyun 		kvmppc_harvest_vpa_dirty(&vcpu->arch.vpa, memslot, buf);
4560*4882a593Smuzhiyun 		kvmppc_harvest_vpa_dirty(&vcpu->arch.dtl, memslot, buf);
4561*4882a593Smuzhiyun 		spin_unlock(&vcpu->arch.vpa_update_lock);
4562*4882a593Smuzhiyun 	}
4563*4882a593Smuzhiyun 
4564*4882a593Smuzhiyun 	r = -EFAULT;
4565*4882a593Smuzhiyun 	if (copy_to_user(log->dirty_bitmap, buf, n))
4566*4882a593Smuzhiyun 		goto out;
4567*4882a593Smuzhiyun 
4568*4882a593Smuzhiyun 	r = 0;
4569*4882a593Smuzhiyun out:
4570*4882a593Smuzhiyun 	mutex_unlock(&kvm->slots_lock);
4571*4882a593Smuzhiyun 	return r;
4572*4882a593Smuzhiyun }
4573*4882a593Smuzhiyun 
kvmppc_core_free_memslot_hv(struct kvm_memory_slot * slot)4574*4882a593Smuzhiyun static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *slot)
4575*4882a593Smuzhiyun {
4576*4882a593Smuzhiyun 	vfree(slot->arch.rmap);
4577*4882a593Smuzhiyun 	slot->arch.rmap = NULL;
4578*4882a593Smuzhiyun }
4579*4882a593Smuzhiyun 
kvmppc_core_prepare_memory_region_hv(struct kvm * kvm,struct kvm_memory_slot * slot,const struct kvm_userspace_memory_region * mem,enum kvm_mr_change change)4580*4882a593Smuzhiyun static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm,
4581*4882a593Smuzhiyun 					struct kvm_memory_slot *slot,
4582*4882a593Smuzhiyun 					const struct kvm_userspace_memory_region *mem,
4583*4882a593Smuzhiyun 					enum kvm_mr_change change)
4584*4882a593Smuzhiyun {
4585*4882a593Smuzhiyun 	unsigned long npages = mem->memory_size >> PAGE_SHIFT;
4586*4882a593Smuzhiyun 
4587*4882a593Smuzhiyun 	if (change == KVM_MR_CREATE) {
4588*4882a593Smuzhiyun 		unsigned long size = array_size(npages, sizeof(*slot->arch.rmap));
4589*4882a593Smuzhiyun 
4590*4882a593Smuzhiyun 		if ((size >> PAGE_SHIFT) > totalram_pages())
4591*4882a593Smuzhiyun 			return -ENOMEM;
4592*4882a593Smuzhiyun 
4593*4882a593Smuzhiyun 		slot->arch.rmap = vzalloc(size);
4594*4882a593Smuzhiyun 		if (!slot->arch.rmap)
4595*4882a593Smuzhiyun 			return -ENOMEM;
4596*4882a593Smuzhiyun 	}
4597*4882a593Smuzhiyun 
4598*4882a593Smuzhiyun 	return 0;
4599*4882a593Smuzhiyun }
4600*4882a593Smuzhiyun 
kvmppc_core_commit_memory_region_hv(struct kvm * kvm,const struct kvm_userspace_memory_region * mem,const struct kvm_memory_slot * old,const struct kvm_memory_slot * new,enum kvm_mr_change change)4601*4882a593Smuzhiyun static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
4602*4882a593Smuzhiyun 				const struct kvm_userspace_memory_region *mem,
4603*4882a593Smuzhiyun 				const struct kvm_memory_slot *old,
4604*4882a593Smuzhiyun 				const struct kvm_memory_slot *new,
4605*4882a593Smuzhiyun 				enum kvm_mr_change change)
4606*4882a593Smuzhiyun {
4607*4882a593Smuzhiyun 	unsigned long npages = mem->memory_size >> PAGE_SHIFT;
4608*4882a593Smuzhiyun 
4609*4882a593Smuzhiyun 	/*
4610*4882a593Smuzhiyun 	 * If we are making a new memslot, it might make
4611*4882a593Smuzhiyun 	 * some address that was previously cached as emulated
4612*4882a593Smuzhiyun 	 * MMIO be no longer emulated MMIO, so invalidate
4613*4882a593Smuzhiyun 	 * all the caches of emulated MMIO translations.
4614*4882a593Smuzhiyun 	 */
4615*4882a593Smuzhiyun 	if (npages)
4616*4882a593Smuzhiyun 		atomic64_inc(&kvm->arch.mmio_update);
4617*4882a593Smuzhiyun 
4618*4882a593Smuzhiyun 	/*
4619*4882a593Smuzhiyun 	 * For change == KVM_MR_MOVE or KVM_MR_DELETE, higher levels
4620*4882a593Smuzhiyun 	 * have already called kvm_arch_flush_shadow_memslot() to
4621*4882a593Smuzhiyun 	 * flush shadow mappings.  For KVM_MR_CREATE we have no
4622*4882a593Smuzhiyun 	 * previous mappings.  So the only case to handle is
4623*4882a593Smuzhiyun 	 * KVM_MR_FLAGS_ONLY when the KVM_MEM_LOG_DIRTY_PAGES bit
4624*4882a593Smuzhiyun 	 * has been changed.
4625*4882a593Smuzhiyun 	 * For radix guests, we flush on setting KVM_MEM_LOG_DIRTY_PAGES
4626*4882a593Smuzhiyun 	 * to get rid of any THP PTEs in the partition-scoped page tables
4627*4882a593Smuzhiyun 	 * so we can track dirtiness at the page level; we flush when
4628*4882a593Smuzhiyun 	 * clearing KVM_MEM_LOG_DIRTY_PAGES so that we can go back to
4629*4882a593Smuzhiyun 	 * using THP PTEs.
4630*4882a593Smuzhiyun 	 */
4631*4882a593Smuzhiyun 	if (change == KVM_MR_FLAGS_ONLY && kvm_is_radix(kvm) &&
4632*4882a593Smuzhiyun 	    ((new->flags ^ old->flags) & KVM_MEM_LOG_DIRTY_PAGES))
4633*4882a593Smuzhiyun 		kvmppc_radix_flush_memslot(kvm, old);
4634*4882a593Smuzhiyun 	/*
4635*4882a593Smuzhiyun 	 * If UV hasn't yet called H_SVM_INIT_START, don't register memslots.
4636*4882a593Smuzhiyun 	 */
4637*4882a593Smuzhiyun 	if (!kvm->arch.secure_guest)
4638*4882a593Smuzhiyun 		return;
4639*4882a593Smuzhiyun 
4640*4882a593Smuzhiyun 	switch (change) {
4641*4882a593Smuzhiyun 	case KVM_MR_CREATE:
4642*4882a593Smuzhiyun 		/*
4643*4882a593Smuzhiyun 		 * @TODO kvmppc_uvmem_memslot_create() can fail and
4644*4882a593Smuzhiyun 		 * return error. Fix this.
4645*4882a593Smuzhiyun 		 */
4646*4882a593Smuzhiyun 		kvmppc_uvmem_memslot_create(kvm, new);
4647*4882a593Smuzhiyun 		break;
4648*4882a593Smuzhiyun 	case KVM_MR_DELETE:
4649*4882a593Smuzhiyun 		kvmppc_uvmem_memslot_delete(kvm, old);
4650*4882a593Smuzhiyun 		break;
4651*4882a593Smuzhiyun 	default:
4652*4882a593Smuzhiyun 		/* TODO: Handle KVM_MR_MOVE */
4653*4882a593Smuzhiyun 		break;
4654*4882a593Smuzhiyun 	}
4655*4882a593Smuzhiyun }
4656*4882a593Smuzhiyun 
4657*4882a593Smuzhiyun /*
4658*4882a593Smuzhiyun  * Update LPCR values in kvm->arch and in vcores.
4659*4882a593Smuzhiyun  * Caller must hold kvm->arch.mmu_setup_lock (for mutual exclusion
4660*4882a593Smuzhiyun  * of kvm->arch.lpcr update).
4661*4882a593Smuzhiyun  */
kvmppc_update_lpcr(struct kvm * kvm,unsigned long lpcr,unsigned long mask)4662*4882a593Smuzhiyun void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
4663*4882a593Smuzhiyun {
4664*4882a593Smuzhiyun 	long int i;
4665*4882a593Smuzhiyun 	u32 cores_done = 0;
4666*4882a593Smuzhiyun 
4667*4882a593Smuzhiyun 	if ((kvm->arch.lpcr & mask) == lpcr)
4668*4882a593Smuzhiyun 		return;
4669*4882a593Smuzhiyun 
4670*4882a593Smuzhiyun 	kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr;
4671*4882a593Smuzhiyun 
4672*4882a593Smuzhiyun 	for (i = 0; i < KVM_MAX_VCORES; ++i) {
4673*4882a593Smuzhiyun 		struct kvmppc_vcore *vc = kvm->arch.vcores[i];
4674*4882a593Smuzhiyun 		if (!vc)
4675*4882a593Smuzhiyun 			continue;
4676*4882a593Smuzhiyun 		spin_lock(&vc->lock);
4677*4882a593Smuzhiyun 		vc->lpcr = (vc->lpcr & ~mask) | lpcr;
4678*4882a593Smuzhiyun 		spin_unlock(&vc->lock);
4679*4882a593Smuzhiyun 		if (++cores_done >= kvm->arch.online_vcores)
4680*4882a593Smuzhiyun 			break;
4681*4882a593Smuzhiyun 	}
4682*4882a593Smuzhiyun }
4683*4882a593Smuzhiyun 
kvmppc_setup_partition_table(struct kvm * kvm)4684*4882a593Smuzhiyun void kvmppc_setup_partition_table(struct kvm *kvm)
4685*4882a593Smuzhiyun {
4686*4882a593Smuzhiyun 	unsigned long dw0, dw1;
4687*4882a593Smuzhiyun 
4688*4882a593Smuzhiyun 	if (!kvm_is_radix(kvm)) {
4689*4882a593Smuzhiyun 		/* PS field - page size for VRMA */
4690*4882a593Smuzhiyun 		dw0 = ((kvm->arch.vrma_slb_v & SLB_VSID_L) >> 1) |
4691*4882a593Smuzhiyun 			((kvm->arch.vrma_slb_v & SLB_VSID_LP) << 1);
4692*4882a593Smuzhiyun 		/* HTABSIZE and HTABORG fields */
4693*4882a593Smuzhiyun 		dw0 |= kvm->arch.sdr1;
4694*4882a593Smuzhiyun 
4695*4882a593Smuzhiyun 		/* Second dword as set by userspace */
4696*4882a593Smuzhiyun 		dw1 = kvm->arch.process_table;
4697*4882a593Smuzhiyun 	} else {
4698*4882a593Smuzhiyun 		dw0 = PATB_HR | radix__get_tree_size() |
4699*4882a593Smuzhiyun 			__pa(kvm->arch.pgtable) | RADIX_PGD_INDEX_SIZE;
4700*4882a593Smuzhiyun 		dw1 = PATB_GR | kvm->arch.process_table;
4701*4882a593Smuzhiyun 	}
4702*4882a593Smuzhiyun 	kvmhv_set_ptbl_entry(kvm->arch.lpid, dw0, dw1);
4703*4882a593Smuzhiyun }
4704*4882a593Smuzhiyun 
4705*4882a593Smuzhiyun /*
4706*4882a593Smuzhiyun  * Set up HPT (hashed page table) and RMA (real-mode area).
4707*4882a593Smuzhiyun  * Must be called with kvm->arch.mmu_setup_lock held.
4708*4882a593Smuzhiyun  */
kvmppc_hv_setup_htab_rma(struct kvm_vcpu * vcpu)4709*4882a593Smuzhiyun static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
4710*4882a593Smuzhiyun {
4711*4882a593Smuzhiyun 	int err = 0;
4712*4882a593Smuzhiyun 	struct kvm *kvm = vcpu->kvm;
4713*4882a593Smuzhiyun 	unsigned long hva;
4714*4882a593Smuzhiyun 	struct kvm_memory_slot *memslot;
4715*4882a593Smuzhiyun 	struct vm_area_struct *vma;
4716*4882a593Smuzhiyun 	unsigned long lpcr = 0, senc;
4717*4882a593Smuzhiyun 	unsigned long psize, porder;
4718*4882a593Smuzhiyun 	int srcu_idx;
4719*4882a593Smuzhiyun 
4720*4882a593Smuzhiyun 	/* Allocate hashed page table (if not done already) and reset it */
4721*4882a593Smuzhiyun 	if (!kvm->arch.hpt.virt) {
4722*4882a593Smuzhiyun 		int order = KVM_DEFAULT_HPT_ORDER;
4723*4882a593Smuzhiyun 		struct kvm_hpt_info info;
4724*4882a593Smuzhiyun 
4725*4882a593Smuzhiyun 		err = kvmppc_allocate_hpt(&info, order);
4726*4882a593Smuzhiyun 		/* If we get here, it means userspace didn't specify a
4727*4882a593Smuzhiyun 		 * size explicitly.  So, try successively smaller
4728*4882a593Smuzhiyun 		 * sizes if the default failed. */
4729*4882a593Smuzhiyun 		while ((err == -ENOMEM) && --order >= PPC_MIN_HPT_ORDER)
4730*4882a593Smuzhiyun 			err  = kvmppc_allocate_hpt(&info, order);
4731*4882a593Smuzhiyun 
4732*4882a593Smuzhiyun 		if (err < 0) {
4733*4882a593Smuzhiyun 			pr_err("KVM: Couldn't alloc HPT\n");
4734*4882a593Smuzhiyun 			goto out;
4735*4882a593Smuzhiyun 		}
4736*4882a593Smuzhiyun 
4737*4882a593Smuzhiyun 		kvmppc_set_hpt(kvm, &info);
4738*4882a593Smuzhiyun 	}
4739*4882a593Smuzhiyun 
4740*4882a593Smuzhiyun 	/* Look up the memslot for guest physical address 0 */
4741*4882a593Smuzhiyun 	srcu_idx = srcu_read_lock(&kvm->srcu);
4742*4882a593Smuzhiyun 	memslot = gfn_to_memslot(kvm, 0);
4743*4882a593Smuzhiyun 
4744*4882a593Smuzhiyun 	/* We must have some memory at 0 by now */
4745*4882a593Smuzhiyun 	err = -EINVAL;
4746*4882a593Smuzhiyun 	if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
4747*4882a593Smuzhiyun 		goto out_srcu;
4748*4882a593Smuzhiyun 
4749*4882a593Smuzhiyun 	/* Look up the VMA for the start of this memory slot */
4750*4882a593Smuzhiyun 	hva = memslot->userspace_addr;
4751*4882a593Smuzhiyun 	mmap_read_lock(kvm->mm);
4752*4882a593Smuzhiyun 	vma = find_vma(kvm->mm, hva);
4753*4882a593Smuzhiyun 	if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO))
4754*4882a593Smuzhiyun 		goto up_out;
4755*4882a593Smuzhiyun 
4756*4882a593Smuzhiyun 	psize = vma_kernel_pagesize(vma);
4757*4882a593Smuzhiyun 
4758*4882a593Smuzhiyun 	mmap_read_unlock(kvm->mm);
4759*4882a593Smuzhiyun 
4760*4882a593Smuzhiyun 	/* We can handle 4k, 64k or 16M pages in the VRMA */
4761*4882a593Smuzhiyun 	if (psize >= 0x1000000)
4762*4882a593Smuzhiyun 		psize = 0x1000000;
4763*4882a593Smuzhiyun 	else if (psize >= 0x10000)
4764*4882a593Smuzhiyun 		psize = 0x10000;
4765*4882a593Smuzhiyun 	else
4766*4882a593Smuzhiyun 		psize = 0x1000;
4767*4882a593Smuzhiyun 	porder = __ilog2(psize);
4768*4882a593Smuzhiyun 
4769*4882a593Smuzhiyun 	senc = slb_pgsize_encoding(psize);
4770*4882a593Smuzhiyun 	kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
4771*4882a593Smuzhiyun 		(VRMA_VSID << SLB_VSID_SHIFT_1T);
4772*4882a593Smuzhiyun 	/* Create HPTEs in the hash page table for the VRMA */
4773*4882a593Smuzhiyun 	kvmppc_map_vrma(vcpu, memslot, porder);
4774*4882a593Smuzhiyun 
4775*4882a593Smuzhiyun 	/* Update VRMASD field in the LPCR */
4776*4882a593Smuzhiyun 	if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
4777*4882a593Smuzhiyun 		/* the -4 is to account for senc values starting at 0x10 */
4778*4882a593Smuzhiyun 		lpcr = senc << (LPCR_VRMASD_SH - 4);
4779*4882a593Smuzhiyun 		kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
4780*4882a593Smuzhiyun 	}
4781*4882a593Smuzhiyun 
4782*4882a593Smuzhiyun 	/* Order updates to kvm->arch.lpcr etc. vs. mmu_ready */
4783*4882a593Smuzhiyun 	smp_wmb();
4784*4882a593Smuzhiyun 	err = 0;
4785*4882a593Smuzhiyun  out_srcu:
4786*4882a593Smuzhiyun 	srcu_read_unlock(&kvm->srcu, srcu_idx);
4787*4882a593Smuzhiyun  out:
4788*4882a593Smuzhiyun 	return err;
4789*4882a593Smuzhiyun 
4790*4882a593Smuzhiyun  up_out:
4791*4882a593Smuzhiyun 	mmap_read_unlock(kvm->mm);
4792*4882a593Smuzhiyun 	goto out_srcu;
4793*4882a593Smuzhiyun }
4794*4882a593Smuzhiyun 
4795*4882a593Smuzhiyun /*
4796*4882a593Smuzhiyun  * Must be called with kvm->arch.mmu_setup_lock held and
4797*4882a593Smuzhiyun  * mmu_ready = 0 and no vcpus running.
4798*4882a593Smuzhiyun  */
kvmppc_switch_mmu_to_hpt(struct kvm * kvm)4799*4882a593Smuzhiyun int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
4800*4882a593Smuzhiyun {
4801*4882a593Smuzhiyun 	if (nesting_enabled(kvm))
4802*4882a593Smuzhiyun 		kvmhv_release_all_nested(kvm);
4803*4882a593Smuzhiyun 	kvmppc_rmap_reset(kvm);
4804*4882a593Smuzhiyun 	kvm->arch.process_table = 0;
4805*4882a593Smuzhiyun 	/* Mutual exclusion with kvm_unmap_hva_range etc. */
4806*4882a593Smuzhiyun 	spin_lock(&kvm->mmu_lock);
4807*4882a593Smuzhiyun 	kvm->arch.radix = 0;
4808*4882a593Smuzhiyun 	spin_unlock(&kvm->mmu_lock);
4809*4882a593Smuzhiyun 	kvmppc_free_radix(kvm);
4810*4882a593Smuzhiyun 	kvmppc_update_lpcr(kvm, LPCR_VPM1,
4811*4882a593Smuzhiyun 			   LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
4812*4882a593Smuzhiyun 	return 0;
4813*4882a593Smuzhiyun }
4814*4882a593Smuzhiyun 
4815*4882a593Smuzhiyun /*
4816*4882a593Smuzhiyun  * Must be called with kvm->arch.mmu_setup_lock held and
4817*4882a593Smuzhiyun  * mmu_ready = 0 and no vcpus running.
4818*4882a593Smuzhiyun  */
kvmppc_switch_mmu_to_radix(struct kvm * kvm)4819*4882a593Smuzhiyun int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
4820*4882a593Smuzhiyun {
4821*4882a593Smuzhiyun 	int err;
4822*4882a593Smuzhiyun 
4823*4882a593Smuzhiyun 	err = kvmppc_init_vm_radix(kvm);
4824*4882a593Smuzhiyun 	if (err)
4825*4882a593Smuzhiyun 		return err;
4826*4882a593Smuzhiyun 	kvmppc_rmap_reset(kvm);
4827*4882a593Smuzhiyun 	/* Mutual exclusion with kvm_unmap_hva_range etc. */
4828*4882a593Smuzhiyun 	spin_lock(&kvm->mmu_lock);
4829*4882a593Smuzhiyun 	kvm->arch.radix = 1;
4830*4882a593Smuzhiyun 	spin_unlock(&kvm->mmu_lock);
4831*4882a593Smuzhiyun 	kvmppc_free_hpt(&kvm->arch.hpt);
4832*4882a593Smuzhiyun 	kvmppc_update_lpcr(kvm, LPCR_UPRT | LPCR_GTSE | LPCR_HR,
4833*4882a593Smuzhiyun 			   LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
4834*4882a593Smuzhiyun 	return 0;
4835*4882a593Smuzhiyun }
4836*4882a593Smuzhiyun 
4837*4882a593Smuzhiyun #ifdef CONFIG_KVM_XICS
4838*4882a593Smuzhiyun /*
4839*4882a593Smuzhiyun  * Allocate a per-core structure for managing state about which cores are
4840*4882a593Smuzhiyun  * running in the host versus the guest and for exchanging data between
4841*4882a593Smuzhiyun  * real mode KVM and CPU running in the host.
4842*4882a593Smuzhiyun  * This is only done for the first VM.
4843*4882a593Smuzhiyun  * The allocated structure stays even if all VMs have stopped.
4844*4882a593Smuzhiyun  * It is only freed when the kvm-hv module is unloaded.
4845*4882a593Smuzhiyun  * It's OK for this routine to fail, we just don't support host
4846*4882a593Smuzhiyun  * core operations like redirecting H_IPI wakeups.
4847*4882a593Smuzhiyun  */
kvmppc_alloc_host_rm_ops(void)4848*4882a593Smuzhiyun void kvmppc_alloc_host_rm_ops(void)
4849*4882a593Smuzhiyun {
4850*4882a593Smuzhiyun 	struct kvmppc_host_rm_ops *ops;
4851*4882a593Smuzhiyun 	unsigned long l_ops;
4852*4882a593Smuzhiyun 	int cpu, core;
4853*4882a593Smuzhiyun 	int size;
4854*4882a593Smuzhiyun 
4855*4882a593Smuzhiyun 	/* Not the first time here ? */
4856*4882a593Smuzhiyun 	if (kvmppc_host_rm_ops_hv != NULL)
4857*4882a593Smuzhiyun 		return;
4858*4882a593Smuzhiyun 
4859*4882a593Smuzhiyun 	ops = kzalloc(sizeof(struct kvmppc_host_rm_ops), GFP_KERNEL);
4860*4882a593Smuzhiyun 	if (!ops)
4861*4882a593Smuzhiyun 		return;
4862*4882a593Smuzhiyun 
4863*4882a593Smuzhiyun 	size = cpu_nr_cores() * sizeof(struct kvmppc_host_rm_core);
4864*4882a593Smuzhiyun 	ops->rm_core = kzalloc(size, GFP_KERNEL);
4865*4882a593Smuzhiyun 
4866*4882a593Smuzhiyun 	if (!ops->rm_core) {
4867*4882a593Smuzhiyun 		kfree(ops);
4868*4882a593Smuzhiyun 		return;
4869*4882a593Smuzhiyun 	}
4870*4882a593Smuzhiyun 
4871*4882a593Smuzhiyun 	cpus_read_lock();
4872*4882a593Smuzhiyun 
4873*4882a593Smuzhiyun 	for (cpu = 0; cpu < nr_cpu_ids; cpu += threads_per_core) {
4874*4882a593Smuzhiyun 		if (!cpu_online(cpu))
4875*4882a593Smuzhiyun 			continue;
4876*4882a593Smuzhiyun 
4877*4882a593Smuzhiyun 		core = cpu >> threads_shift;
4878*4882a593Smuzhiyun 		ops->rm_core[core].rm_state.in_host = 1;
4879*4882a593Smuzhiyun 	}
4880*4882a593Smuzhiyun 
4881*4882a593Smuzhiyun 	ops->vcpu_kick = kvmppc_fast_vcpu_kick_hv;
4882*4882a593Smuzhiyun 
4883*4882a593Smuzhiyun 	/*
4884*4882a593Smuzhiyun 	 * Make the contents of the kvmppc_host_rm_ops structure visible
4885*4882a593Smuzhiyun 	 * to other CPUs before we assign it to the global variable.
4886*4882a593Smuzhiyun 	 * Do an atomic assignment (no locks used here), but if someone
4887*4882a593Smuzhiyun 	 * beats us to it, just free our copy and return.
4888*4882a593Smuzhiyun 	 */
4889*4882a593Smuzhiyun 	smp_wmb();
4890*4882a593Smuzhiyun 	l_ops = (unsigned long) ops;
4891*4882a593Smuzhiyun 
4892*4882a593Smuzhiyun 	if (cmpxchg64((unsigned long *)&kvmppc_host_rm_ops_hv, 0, l_ops)) {
4893*4882a593Smuzhiyun 		cpus_read_unlock();
4894*4882a593Smuzhiyun 		kfree(ops->rm_core);
4895*4882a593Smuzhiyun 		kfree(ops);
4896*4882a593Smuzhiyun 		return;
4897*4882a593Smuzhiyun 	}
4898*4882a593Smuzhiyun 
4899*4882a593Smuzhiyun 	cpuhp_setup_state_nocalls_cpuslocked(CPUHP_KVM_PPC_BOOK3S_PREPARE,
4900*4882a593Smuzhiyun 					     "ppc/kvm_book3s:prepare",
4901*4882a593Smuzhiyun 					     kvmppc_set_host_core,
4902*4882a593Smuzhiyun 					     kvmppc_clear_host_core);
4903*4882a593Smuzhiyun 	cpus_read_unlock();
4904*4882a593Smuzhiyun }
4905*4882a593Smuzhiyun 
kvmppc_free_host_rm_ops(void)4906*4882a593Smuzhiyun void kvmppc_free_host_rm_ops(void)
4907*4882a593Smuzhiyun {
4908*4882a593Smuzhiyun 	if (kvmppc_host_rm_ops_hv) {
4909*4882a593Smuzhiyun 		cpuhp_remove_state_nocalls(CPUHP_KVM_PPC_BOOK3S_PREPARE);
4910*4882a593Smuzhiyun 		kfree(kvmppc_host_rm_ops_hv->rm_core);
4911*4882a593Smuzhiyun 		kfree(kvmppc_host_rm_ops_hv);
4912*4882a593Smuzhiyun 		kvmppc_host_rm_ops_hv = NULL;
4913*4882a593Smuzhiyun 	}
4914*4882a593Smuzhiyun }
4915*4882a593Smuzhiyun #endif
4916*4882a593Smuzhiyun 
kvmppc_core_init_vm_hv(struct kvm * kvm)4917*4882a593Smuzhiyun static int kvmppc_core_init_vm_hv(struct kvm *kvm)
4918*4882a593Smuzhiyun {
4919*4882a593Smuzhiyun 	unsigned long lpcr, lpid;
4920*4882a593Smuzhiyun 	char buf[32];
4921*4882a593Smuzhiyun 	int ret;
4922*4882a593Smuzhiyun 
4923*4882a593Smuzhiyun 	mutex_init(&kvm->arch.uvmem_lock);
4924*4882a593Smuzhiyun 	INIT_LIST_HEAD(&kvm->arch.uvmem_pfns);
4925*4882a593Smuzhiyun 	mutex_init(&kvm->arch.mmu_setup_lock);
4926*4882a593Smuzhiyun 
4927*4882a593Smuzhiyun 	/* Allocate the guest's logical partition ID */
4928*4882a593Smuzhiyun 
4929*4882a593Smuzhiyun 	lpid = kvmppc_alloc_lpid();
4930*4882a593Smuzhiyun 	if ((long)lpid < 0)
4931*4882a593Smuzhiyun 		return -ENOMEM;
4932*4882a593Smuzhiyun 	kvm->arch.lpid = lpid;
4933*4882a593Smuzhiyun 
4934*4882a593Smuzhiyun 	kvmppc_alloc_host_rm_ops();
4935*4882a593Smuzhiyun 
4936*4882a593Smuzhiyun 	kvmhv_vm_nested_init(kvm);
4937*4882a593Smuzhiyun 
4938*4882a593Smuzhiyun 	/*
4939*4882a593Smuzhiyun 	 * Since we don't flush the TLB when tearing down a VM,
4940*4882a593Smuzhiyun 	 * and this lpid might have previously been used,
4941*4882a593Smuzhiyun 	 * make sure we flush on each core before running the new VM.
4942*4882a593Smuzhiyun 	 * On POWER9, the tlbie in mmu_partition_table_set_entry()
4943*4882a593Smuzhiyun 	 * does this flush for us.
4944*4882a593Smuzhiyun 	 */
4945*4882a593Smuzhiyun 	if (!cpu_has_feature(CPU_FTR_ARCH_300))
4946*4882a593Smuzhiyun 		cpumask_setall(&kvm->arch.need_tlb_flush);
4947*4882a593Smuzhiyun 
4948*4882a593Smuzhiyun 	/* Start out with the default set of hcalls enabled */
4949*4882a593Smuzhiyun 	memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls,
4950*4882a593Smuzhiyun 	       sizeof(kvm->arch.enabled_hcalls));
4951*4882a593Smuzhiyun 
4952*4882a593Smuzhiyun 	if (!cpu_has_feature(CPU_FTR_ARCH_300))
4953*4882a593Smuzhiyun 		kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
4954*4882a593Smuzhiyun 
4955*4882a593Smuzhiyun 	/* Init LPCR for virtual RMA mode */
4956*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_HVMODE)) {
4957*4882a593Smuzhiyun 		kvm->arch.host_lpid = mfspr(SPRN_LPID);
4958*4882a593Smuzhiyun 		kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
4959*4882a593Smuzhiyun 		lpcr &= LPCR_PECE | LPCR_LPES;
4960*4882a593Smuzhiyun 	} else {
4961*4882a593Smuzhiyun 		lpcr = 0;
4962*4882a593Smuzhiyun 	}
4963*4882a593Smuzhiyun 	lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
4964*4882a593Smuzhiyun 		LPCR_VPM0 | LPCR_VPM1;
4965*4882a593Smuzhiyun 	kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
4966*4882a593Smuzhiyun 		(VRMA_VSID << SLB_VSID_SHIFT_1T);
4967*4882a593Smuzhiyun 	/* On POWER8 turn on online bit to enable PURR/SPURR */
4968*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_ARCH_207S))
4969*4882a593Smuzhiyun 		lpcr |= LPCR_ONL;
4970*4882a593Smuzhiyun 	/*
4971*4882a593Smuzhiyun 	 * On POWER9, VPM0 bit is reserved (VPM0=1 behaviour is assumed)
4972*4882a593Smuzhiyun 	 * Set HVICE bit to enable hypervisor virtualization interrupts.
4973*4882a593Smuzhiyun 	 * Set HEIC to prevent OS interrupts to go to hypervisor (should
4974*4882a593Smuzhiyun 	 * be unnecessary but better safe than sorry in case we re-enable
4975*4882a593Smuzhiyun 	 * EE in HV mode with this LPCR still set)
4976*4882a593Smuzhiyun 	 */
4977*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
4978*4882a593Smuzhiyun 		lpcr &= ~LPCR_VPM0;
4979*4882a593Smuzhiyun 		lpcr |= LPCR_HVICE | LPCR_HEIC;
4980*4882a593Smuzhiyun 
4981*4882a593Smuzhiyun 		/*
4982*4882a593Smuzhiyun 		 * If xive is enabled, we route 0x500 interrupts directly
4983*4882a593Smuzhiyun 		 * to the guest.
4984*4882a593Smuzhiyun 		 */
4985*4882a593Smuzhiyun 		if (xics_on_xive())
4986*4882a593Smuzhiyun 			lpcr |= LPCR_LPES;
4987*4882a593Smuzhiyun 	}
4988*4882a593Smuzhiyun 
4989*4882a593Smuzhiyun 	/*
4990*4882a593Smuzhiyun 	 * If the host uses radix, the guest starts out as radix.
4991*4882a593Smuzhiyun 	 */
4992*4882a593Smuzhiyun 	if (radix_enabled()) {
4993*4882a593Smuzhiyun 		kvm->arch.radix = 1;
4994*4882a593Smuzhiyun 		kvm->arch.mmu_ready = 1;
4995*4882a593Smuzhiyun 		lpcr &= ~LPCR_VPM1;
4996*4882a593Smuzhiyun 		lpcr |= LPCR_UPRT | LPCR_GTSE | LPCR_HR;
4997*4882a593Smuzhiyun 		ret = kvmppc_init_vm_radix(kvm);
4998*4882a593Smuzhiyun 		if (ret) {
4999*4882a593Smuzhiyun 			kvmppc_free_lpid(kvm->arch.lpid);
5000*4882a593Smuzhiyun 			return ret;
5001*4882a593Smuzhiyun 		}
5002*4882a593Smuzhiyun 		kvmppc_setup_partition_table(kvm);
5003*4882a593Smuzhiyun 	}
5004*4882a593Smuzhiyun 
5005*4882a593Smuzhiyun 	kvm->arch.lpcr = lpcr;
5006*4882a593Smuzhiyun 
5007*4882a593Smuzhiyun 	/* Initialization for future HPT resizes */
5008*4882a593Smuzhiyun 	kvm->arch.resize_hpt = NULL;
5009*4882a593Smuzhiyun 
5010*4882a593Smuzhiyun 	/*
5011*4882a593Smuzhiyun 	 * Work out how many sets the TLB has, for the use of
5012*4882a593Smuzhiyun 	 * the TLB invalidation loop in book3s_hv_rmhandlers.S.
5013*4882a593Smuzhiyun 	 */
5014*4882a593Smuzhiyun 	if (radix_enabled())
5015*4882a593Smuzhiyun 		kvm->arch.tlb_sets = POWER9_TLB_SETS_RADIX;	/* 128 */
5016*4882a593Smuzhiyun 	else if (cpu_has_feature(CPU_FTR_ARCH_300))
5017*4882a593Smuzhiyun 		kvm->arch.tlb_sets = POWER9_TLB_SETS_HASH;	/* 256 */
5018*4882a593Smuzhiyun 	else if (cpu_has_feature(CPU_FTR_ARCH_207S))
5019*4882a593Smuzhiyun 		kvm->arch.tlb_sets = POWER8_TLB_SETS;		/* 512 */
5020*4882a593Smuzhiyun 	else
5021*4882a593Smuzhiyun 		kvm->arch.tlb_sets = POWER7_TLB_SETS;		/* 128 */
5022*4882a593Smuzhiyun 
5023*4882a593Smuzhiyun 	/*
5024*4882a593Smuzhiyun 	 * Track that we now have a HV mode VM active. This blocks secondary
5025*4882a593Smuzhiyun 	 * CPU threads from coming online.
5026*4882a593Smuzhiyun 	 * On POWER9, we only need to do this if the "indep_threads_mode"
5027*4882a593Smuzhiyun 	 * module parameter has been set to N.
5028*4882a593Smuzhiyun 	 */
5029*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
5030*4882a593Smuzhiyun 		if (!indep_threads_mode && !cpu_has_feature(CPU_FTR_HVMODE)) {
5031*4882a593Smuzhiyun 			pr_warn("KVM: Ignoring indep_threads_mode=N in nested hypervisor\n");
5032*4882a593Smuzhiyun 			kvm->arch.threads_indep = true;
5033*4882a593Smuzhiyun 		} else {
5034*4882a593Smuzhiyun 			kvm->arch.threads_indep = indep_threads_mode;
5035*4882a593Smuzhiyun 		}
5036*4882a593Smuzhiyun 	}
5037*4882a593Smuzhiyun 	if (!kvm->arch.threads_indep)
5038*4882a593Smuzhiyun 		kvm_hv_vm_activated();
5039*4882a593Smuzhiyun 
5040*4882a593Smuzhiyun 	/*
5041*4882a593Smuzhiyun 	 * Initialize smt_mode depending on processor.
5042*4882a593Smuzhiyun 	 * POWER8 and earlier have to use "strict" threading, where
5043*4882a593Smuzhiyun 	 * all vCPUs in a vcore have to run on the same (sub)core,
5044*4882a593Smuzhiyun 	 * whereas on POWER9 the threads can each run a different
5045*4882a593Smuzhiyun 	 * guest.
5046*4882a593Smuzhiyun 	 */
5047*4882a593Smuzhiyun 	if (!cpu_has_feature(CPU_FTR_ARCH_300))
5048*4882a593Smuzhiyun 		kvm->arch.smt_mode = threads_per_subcore;
5049*4882a593Smuzhiyun 	else
5050*4882a593Smuzhiyun 		kvm->arch.smt_mode = 1;
5051*4882a593Smuzhiyun 	kvm->arch.emul_smt_mode = 1;
5052*4882a593Smuzhiyun 
5053*4882a593Smuzhiyun 	/*
5054*4882a593Smuzhiyun 	 * Create a debugfs directory for the VM
5055*4882a593Smuzhiyun 	 */
5056*4882a593Smuzhiyun 	snprintf(buf, sizeof(buf), "vm%d", current->pid);
5057*4882a593Smuzhiyun 	kvm->arch.debugfs_dir = debugfs_create_dir(buf, kvm_debugfs_dir);
5058*4882a593Smuzhiyun 	kvmppc_mmu_debugfs_init(kvm);
5059*4882a593Smuzhiyun 	if (radix_enabled())
5060*4882a593Smuzhiyun 		kvmhv_radix_debugfs_init(kvm);
5061*4882a593Smuzhiyun 
5062*4882a593Smuzhiyun 	return 0;
5063*4882a593Smuzhiyun }
5064*4882a593Smuzhiyun 
kvmppc_free_vcores(struct kvm * kvm)5065*4882a593Smuzhiyun static void kvmppc_free_vcores(struct kvm *kvm)
5066*4882a593Smuzhiyun {
5067*4882a593Smuzhiyun 	long int i;
5068*4882a593Smuzhiyun 
5069*4882a593Smuzhiyun 	for (i = 0; i < KVM_MAX_VCORES; ++i)
5070*4882a593Smuzhiyun 		kfree(kvm->arch.vcores[i]);
5071*4882a593Smuzhiyun 	kvm->arch.online_vcores = 0;
5072*4882a593Smuzhiyun }
5073*4882a593Smuzhiyun 
kvmppc_core_destroy_vm_hv(struct kvm * kvm)5074*4882a593Smuzhiyun static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
5075*4882a593Smuzhiyun {
5076*4882a593Smuzhiyun 	debugfs_remove_recursive(kvm->arch.debugfs_dir);
5077*4882a593Smuzhiyun 
5078*4882a593Smuzhiyun 	if (!kvm->arch.threads_indep)
5079*4882a593Smuzhiyun 		kvm_hv_vm_deactivated();
5080*4882a593Smuzhiyun 
5081*4882a593Smuzhiyun 	kvmppc_free_vcores(kvm);
5082*4882a593Smuzhiyun 
5083*4882a593Smuzhiyun 
5084*4882a593Smuzhiyun 	if (kvm_is_radix(kvm))
5085*4882a593Smuzhiyun 		kvmppc_free_radix(kvm);
5086*4882a593Smuzhiyun 	else
5087*4882a593Smuzhiyun 		kvmppc_free_hpt(&kvm->arch.hpt);
5088*4882a593Smuzhiyun 
5089*4882a593Smuzhiyun 	/* Perform global invalidation and return lpid to the pool */
5090*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
5091*4882a593Smuzhiyun 		if (nesting_enabled(kvm))
5092*4882a593Smuzhiyun 			kvmhv_release_all_nested(kvm);
5093*4882a593Smuzhiyun 		kvm->arch.process_table = 0;
5094*4882a593Smuzhiyun 		if (kvm->arch.secure_guest)
5095*4882a593Smuzhiyun 			uv_svm_terminate(kvm->arch.lpid);
5096*4882a593Smuzhiyun 		kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0);
5097*4882a593Smuzhiyun 	}
5098*4882a593Smuzhiyun 
5099*4882a593Smuzhiyun 	kvmppc_free_lpid(kvm->arch.lpid);
5100*4882a593Smuzhiyun 
5101*4882a593Smuzhiyun 	kvmppc_free_pimap(kvm);
5102*4882a593Smuzhiyun }
5103*4882a593Smuzhiyun 
5104*4882a593Smuzhiyun /* We don't need to emulate any privileged instructions or dcbz */
kvmppc_core_emulate_op_hv(struct kvm_vcpu * vcpu,unsigned int inst,int * advance)5105*4882a593Smuzhiyun static int kvmppc_core_emulate_op_hv(struct kvm_vcpu *vcpu,
5106*4882a593Smuzhiyun 				     unsigned int inst, int *advance)
5107*4882a593Smuzhiyun {
5108*4882a593Smuzhiyun 	return EMULATE_FAIL;
5109*4882a593Smuzhiyun }
5110*4882a593Smuzhiyun 
kvmppc_core_emulate_mtspr_hv(struct kvm_vcpu * vcpu,int sprn,ulong spr_val)5111*4882a593Smuzhiyun static int kvmppc_core_emulate_mtspr_hv(struct kvm_vcpu *vcpu, int sprn,
5112*4882a593Smuzhiyun 					ulong spr_val)
5113*4882a593Smuzhiyun {
5114*4882a593Smuzhiyun 	return EMULATE_FAIL;
5115*4882a593Smuzhiyun }
5116*4882a593Smuzhiyun 
kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu * vcpu,int sprn,ulong * spr_val)5117*4882a593Smuzhiyun static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn,
5118*4882a593Smuzhiyun 					ulong *spr_val)
5119*4882a593Smuzhiyun {
5120*4882a593Smuzhiyun 	return EMULATE_FAIL;
5121*4882a593Smuzhiyun }
5122*4882a593Smuzhiyun 
kvmppc_core_check_processor_compat_hv(void)5123*4882a593Smuzhiyun static int kvmppc_core_check_processor_compat_hv(void)
5124*4882a593Smuzhiyun {
5125*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_HVMODE) &&
5126*4882a593Smuzhiyun 	    cpu_has_feature(CPU_FTR_ARCH_206))
5127*4882a593Smuzhiyun 		return 0;
5128*4882a593Smuzhiyun 
5129*4882a593Smuzhiyun 	/* POWER9 in radix mode is capable of being a nested hypervisor. */
5130*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled())
5131*4882a593Smuzhiyun 		return 0;
5132*4882a593Smuzhiyun 
5133*4882a593Smuzhiyun 	return -EIO;
5134*4882a593Smuzhiyun }
5135*4882a593Smuzhiyun 
5136*4882a593Smuzhiyun #ifdef CONFIG_KVM_XICS
5137*4882a593Smuzhiyun 
kvmppc_free_pimap(struct kvm * kvm)5138*4882a593Smuzhiyun void kvmppc_free_pimap(struct kvm *kvm)
5139*4882a593Smuzhiyun {
5140*4882a593Smuzhiyun 	kfree(kvm->arch.pimap);
5141*4882a593Smuzhiyun }
5142*4882a593Smuzhiyun 
kvmppc_alloc_pimap(void)5143*4882a593Smuzhiyun static struct kvmppc_passthru_irqmap *kvmppc_alloc_pimap(void)
5144*4882a593Smuzhiyun {
5145*4882a593Smuzhiyun 	return kzalloc(sizeof(struct kvmppc_passthru_irqmap), GFP_KERNEL);
5146*4882a593Smuzhiyun }
5147*4882a593Smuzhiyun 
kvmppc_set_passthru_irq(struct kvm * kvm,int host_irq,int guest_gsi)5148*4882a593Smuzhiyun static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
5149*4882a593Smuzhiyun {
5150*4882a593Smuzhiyun 	struct irq_desc *desc;
5151*4882a593Smuzhiyun 	struct kvmppc_irq_map *irq_map;
5152*4882a593Smuzhiyun 	struct kvmppc_passthru_irqmap *pimap;
5153*4882a593Smuzhiyun 	struct irq_chip *chip;
5154*4882a593Smuzhiyun 	int i, rc = 0;
5155*4882a593Smuzhiyun 
5156*4882a593Smuzhiyun 	if (!kvm_irq_bypass)
5157*4882a593Smuzhiyun 		return 1;
5158*4882a593Smuzhiyun 
5159*4882a593Smuzhiyun 	desc = irq_to_desc(host_irq);
5160*4882a593Smuzhiyun 	if (!desc)
5161*4882a593Smuzhiyun 		return -EIO;
5162*4882a593Smuzhiyun 
5163*4882a593Smuzhiyun 	mutex_lock(&kvm->lock);
5164*4882a593Smuzhiyun 
5165*4882a593Smuzhiyun 	pimap = kvm->arch.pimap;
5166*4882a593Smuzhiyun 	if (pimap == NULL) {
5167*4882a593Smuzhiyun 		/* First call, allocate structure to hold IRQ map */
5168*4882a593Smuzhiyun 		pimap = kvmppc_alloc_pimap();
5169*4882a593Smuzhiyun 		if (pimap == NULL) {
5170*4882a593Smuzhiyun 			mutex_unlock(&kvm->lock);
5171*4882a593Smuzhiyun 			return -ENOMEM;
5172*4882a593Smuzhiyun 		}
5173*4882a593Smuzhiyun 		kvm->arch.pimap = pimap;
5174*4882a593Smuzhiyun 	}
5175*4882a593Smuzhiyun 
5176*4882a593Smuzhiyun 	/*
5177*4882a593Smuzhiyun 	 * For now, we only support interrupts for which the EOI operation
5178*4882a593Smuzhiyun 	 * is an OPAL call followed by a write to XIRR, since that's
5179*4882a593Smuzhiyun 	 * what our real-mode EOI code does, or a XIVE interrupt
5180*4882a593Smuzhiyun 	 */
5181*4882a593Smuzhiyun 	chip = irq_data_get_irq_chip(&desc->irq_data);
5182*4882a593Smuzhiyun 	if (!chip || !(is_pnv_opal_msi(chip) || is_xive_irq(chip))) {
5183*4882a593Smuzhiyun 		pr_warn("kvmppc_set_passthru_irq_hv: Could not assign IRQ map for (%d,%d)\n",
5184*4882a593Smuzhiyun 			host_irq, guest_gsi);
5185*4882a593Smuzhiyun 		mutex_unlock(&kvm->lock);
5186*4882a593Smuzhiyun 		return -ENOENT;
5187*4882a593Smuzhiyun 	}
5188*4882a593Smuzhiyun 
5189*4882a593Smuzhiyun 	/*
5190*4882a593Smuzhiyun 	 * See if we already have an entry for this guest IRQ number.
5191*4882a593Smuzhiyun 	 * If it's mapped to a hardware IRQ number, that's an error,
5192*4882a593Smuzhiyun 	 * otherwise re-use this entry.
5193*4882a593Smuzhiyun 	 */
5194*4882a593Smuzhiyun 	for (i = 0; i < pimap->n_mapped; i++) {
5195*4882a593Smuzhiyun 		if (guest_gsi == pimap->mapped[i].v_hwirq) {
5196*4882a593Smuzhiyun 			if (pimap->mapped[i].r_hwirq) {
5197*4882a593Smuzhiyun 				mutex_unlock(&kvm->lock);
5198*4882a593Smuzhiyun 				return -EINVAL;
5199*4882a593Smuzhiyun 			}
5200*4882a593Smuzhiyun 			break;
5201*4882a593Smuzhiyun 		}
5202*4882a593Smuzhiyun 	}
5203*4882a593Smuzhiyun 
5204*4882a593Smuzhiyun 	if (i == KVMPPC_PIRQ_MAPPED) {
5205*4882a593Smuzhiyun 		mutex_unlock(&kvm->lock);
5206*4882a593Smuzhiyun 		return -EAGAIN;		/* table is full */
5207*4882a593Smuzhiyun 	}
5208*4882a593Smuzhiyun 
5209*4882a593Smuzhiyun 	irq_map = &pimap->mapped[i];
5210*4882a593Smuzhiyun 
5211*4882a593Smuzhiyun 	irq_map->v_hwirq = guest_gsi;
5212*4882a593Smuzhiyun 	irq_map->desc = desc;
5213*4882a593Smuzhiyun 
5214*4882a593Smuzhiyun 	/*
5215*4882a593Smuzhiyun 	 * Order the above two stores before the next to serialize with
5216*4882a593Smuzhiyun 	 * the KVM real mode handler.
5217*4882a593Smuzhiyun 	 */
5218*4882a593Smuzhiyun 	smp_wmb();
5219*4882a593Smuzhiyun 	irq_map->r_hwirq = desc->irq_data.hwirq;
5220*4882a593Smuzhiyun 
5221*4882a593Smuzhiyun 	if (i == pimap->n_mapped)
5222*4882a593Smuzhiyun 		pimap->n_mapped++;
5223*4882a593Smuzhiyun 
5224*4882a593Smuzhiyun 	if (xics_on_xive())
5225*4882a593Smuzhiyun 		rc = kvmppc_xive_set_mapped(kvm, guest_gsi, desc);
5226*4882a593Smuzhiyun 	else
5227*4882a593Smuzhiyun 		kvmppc_xics_set_mapped(kvm, guest_gsi, desc->irq_data.hwirq);
5228*4882a593Smuzhiyun 	if (rc)
5229*4882a593Smuzhiyun 		irq_map->r_hwirq = 0;
5230*4882a593Smuzhiyun 
5231*4882a593Smuzhiyun 	mutex_unlock(&kvm->lock);
5232*4882a593Smuzhiyun 
5233*4882a593Smuzhiyun 	return 0;
5234*4882a593Smuzhiyun }
5235*4882a593Smuzhiyun 
kvmppc_clr_passthru_irq(struct kvm * kvm,int host_irq,int guest_gsi)5236*4882a593Smuzhiyun static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
5237*4882a593Smuzhiyun {
5238*4882a593Smuzhiyun 	struct irq_desc *desc;
5239*4882a593Smuzhiyun 	struct kvmppc_passthru_irqmap *pimap;
5240*4882a593Smuzhiyun 	int i, rc = 0;
5241*4882a593Smuzhiyun 
5242*4882a593Smuzhiyun 	if (!kvm_irq_bypass)
5243*4882a593Smuzhiyun 		return 0;
5244*4882a593Smuzhiyun 
5245*4882a593Smuzhiyun 	desc = irq_to_desc(host_irq);
5246*4882a593Smuzhiyun 	if (!desc)
5247*4882a593Smuzhiyun 		return -EIO;
5248*4882a593Smuzhiyun 
5249*4882a593Smuzhiyun 	mutex_lock(&kvm->lock);
5250*4882a593Smuzhiyun 	if (!kvm->arch.pimap)
5251*4882a593Smuzhiyun 		goto unlock;
5252*4882a593Smuzhiyun 
5253*4882a593Smuzhiyun 	pimap = kvm->arch.pimap;
5254*4882a593Smuzhiyun 
5255*4882a593Smuzhiyun 	for (i = 0; i < pimap->n_mapped; i++) {
5256*4882a593Smuzhiyun 		if (guest_gsi == pimap->mapped[i].v_hwirq)
5257*4882a593Smuzhiyun 			break;
5258*4882a593Smuzhiyun 	}
5259*4882a593Smuzhiyun 
5260*4882a593Smuzhiyun 	if (i == pimap->n_mapped) {
5261*4882a593Smuzhiyun 		mutex_unlock(&kvm->lock);
5262*4882a593Smuzhiyun 		return -ENODEV;
5263*4882a593Smuzhiyun 	}
5264*4882a593Smuzhiyun 
5265*4882a593Smuzhiyun 	if (xics_on_xive())
5266*4882a593Smuzhiyun 		rc = kvmppc_xive_clr_mapped(kvm, guest_gsi, pimap->mapped[i].desc);
5267*4882a593Smuzhiyun 	else
5268*4882a593Smuzhiyun 		kvmppc_xics_clr_mapped(kvm, guest_gsi, pimap->mapped[i].r_hwirq);
5269*4882a593Smuzhiyun 
5270*4882a593Smuzhiyun 	/* invalidate the entry (what do do on error from the above ?) */
5271*4882a593Smuzhiyun 	pimap->mapped[i].r_hwirq = 0;
5272*4882a593Smuzhiyun 
5273*4882a593Smuzhiyun 	/*
5274*4882a593Smuzhiyun 	 * We don't free this structure even when the count goes to
5275*4882a593Smuzhiyun 	 * zero. The structure is freed when we destroy the VM.
5276*4882a593Smuzhiyun 	 */
5277*4882a593Smuzhiyun  unlock:
5278*4882a593Smuzhiyun 	mutex_unlock(&kvm->lock);
5279*4882a593Smuzhiyun 	return rc;
5280*4882a593Smuzhiyun }
5281*4882a593Smuzhiyun 
kvmppc_irq_bypass_add_producer_hv(struct irq_bypass_consumer * cons,struct irq_bypass_producer * prod)5282*4882a593Smuzhiyun static int kvmppc_irq_bypass_add_producer_hv(struct irq_bypass_consumer *cons,
5283*4882a593Smuzhiyun 					     struct irq_bypass_producer *prod)
5284*4882a593Smuzhiyun {
5285*4882a593Smuzhiyun 	int ret = 0;
5286*4882a593Smuzhiyun 	struct kvm_kernel_irqfd *irqfd =
5287*4882a593Smuzhiyun 		container_of(cons, struct kvm_kernel_irqfd, consumer);
5288*4882a593Smuzhiyun 
5289*4882a593Smuzhiyun 	irqfd->producer = prod;
5290*4882a593Smuzhiyun 
5291*4882a593Smuzhiyun 	ret = kvmppc_set_passthru_irq(irqfd->kvm, prod->irq, irqfd->gsi);
5292*4882a593Smuzhiyun 	if (ret)
5293*4882a593Smuzhiyun 		pr_info("kvmppc_set_passthru_irq (irq %d, gsi %d) fails: %d\n",
5294*4882a593Smuzhiyun 			prod->irq, irqfd->gsi, ret);
5295*4882a593Smuzhiyun 
5296*4882a593Smuzhiyun 	return ret;
5297*4882a593Smuzhiyun }
5298*4882a593Smuzhiyun 
kvmppc_irq_bypass_del_producer_hv(struct irq_bypass_consumer * cons,struct irq_bypass_producer * prod)5299*4882a593Smuzhiyun static void kvmppc_irq_bypass_del_producer_hv(struct irq_bypass_consumer *cons,
5300*4882a593Smuzhiyun 					      struct irq_bypass_producer *prod)
5301*4882a593Smuzhiyun {
5302*4882a593Smuzhiyun 	int ret;
5303*4882a593Smuzhiyun 	struct kvm_kernel_irqfd *irqfd =
5304*4882a593Smuzhiyun 		container_of(cons, struct kvm_kernel_irqfd, consumer);
5305*4882a593Smuzhiyun 
5306*4882a593Smuzhiyun 	irqfd->producer = NULL;
5307*4882a593Smuzhiyun 
5308*4882a593Smuzhiyun 	/*
5309*4882a593Smuzhiyun 	 * When producer of consumer is unregistered, we change back to
5310*4882a593Smuzhiyun 	 * default external interrupt handling mode - KVM real mode
5311*4882a593Smuzhiyun 	 * will switch back to host.
5312*4882a593Smuzhiyun 	 */
5313*4882a593Smuzhiyun 	ret = kvmppc_clr_passthru_irq(irqfd->kvm, prod->irq, irqfd->gsi);
5314*4882a593Smuzhiyun 	if (ret)
5315*4882a593Smuzhiyun 		pr_warn("kvmppc_clr_passthru_irq (irq %d, gsi %d) fails: %d\n",
5316*4882a593Smuzhiyun 			prod->irq, irqfd->gsi, ret);
5317*4882a593Smuzhiyun }
5318*4882a593Smuzhiyun #endif
5319*4882a593Smuzhiyun 
kvm_arch_vm_ioctl_hv(struct file * filp,unsigned int ioctl,unsigned long arg)5320*4882a593Smuzhiyun static long kvm_arch_vm_ioctl_hv(struct file *filp,
5321*4882a593Smuzhiyun 				 unsigned int ioctl, unsigned long arg)
5322*4882a593Smuzhiyun {
5323*4882a593Smuzhiyun 	struct kvm *kvm __maybe_unused = filp->private_data;
5324*4882a593Smuzhiyun 	void __user *argp = (void __user *)arg;
5325*4882a593Smuzhiyun 	long r;
5326*4882a593Smuzhiyun 
5327*4882a593Smuzhiyun 	switch (ioctl) {
5328*4882a593Smuzhiyun 
5329*4882a593Smuzhiyun 	case KVM_PPC_ALLOCATE_HTAB: {
5330*4882a593Smuzhiyun 		u32 htab_order;
5331*4882a593Smuzhiyun 
5332*4882a593Smuzhiyun 		/* If we're a nested hypervisor, we currently only support radix */
5333*4882a593Smuzhiyun 		if (kvmhv_on_pseries()) {
5334*4882a593Smuzhiyun 			r = -EOPNOTSUPP;
5335*4882a593Smuzhiyun 			break;
5336*4882a593Smuzhiyun 		}
5337*4882a593Smuzhiyun 
5338*4882a593Smuzhiyun 		r = -EFAULT;
5339*4882a593Smuzhiyun 		if (get_user(htab_order, (u32 __user *)argp))
5340*4882a593Smuzhiyun 			break;
5341*4882a593Smuzhiyun 		r = kvmppc_alloc_reset_hpt(kvm, htab_order);
5342*4882a593Smuzhiyun 		if (r)
5343*4882a593Smuzhiyun 			break;
5344*4882a593Smuzhiyun 		r = 0;
5345*4882a593Smuzhiyun 		break;
5346*4882a593Smuzhiyun 	}
5347*4882a593Smuzhiyun 
5348*4882a593Smuzhiyun 	case KVM_PPC_GET_HTAB_FD: {
5349*4882a593Smuzhiyun 		struct kvm_get_htab_fd ghf;
5350*4882a593Smuzhiyun 
5351*4882a593Smuzhiyun 		r = -EFAULT;
5352*4882a593Smuzhiyun 		if (copy_from_user(&ghf, argp, sizeof(ghf)))
5353*4882a593Smuzhiyun 			break;
5354*4882a593Smuzhiyun 		r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf);
5355*4882a593Smuzhiyun 		break;
5356*4882a593Smuzhiyun 	}
5357*4882a593Smuzhiyun 
5358*4882a593Smuzhiyun 	case KVM_PPC_RESIZE_HPT_PREPARE: {
5359*4882a593Smuzhiyun 		struct kvm_ppc_resize_hpt rhpt;
5360*4882a593Smuzhiyun 
5361*4882a593Smuzhiyun 		r = -EFAULT;
5362*4882a593Smuzhiyun 		if (copy_from_user(&rhpt, argp, sizeof(rhpt)))
5363*4882a593Smuzhiyun 			break;
5364*4882a593Smuzhiyun 
5365*4882a593Smuzhiyun 		r = kvm_vm_ioctl_resize_hpt_prepare(kvm, &rhpt);
5366*4882a593Smuzhiyun 		break;
5367*4882a593Smuzhiyun 	}
5368*4882a593Smuzhiyun 
5369*4882a593Smuzhiyun 	case KVM_PPC_RESIZE_HPT_COMMIT: {
5370*4882a593Smuzhiyun 		struct kvm_ppc_resize_hpt rhpt;
5371*4882a593Smuzhiyun 
5372*4882a593Smuzhiyun 		r = -EFAULT;
5373*4882a593Smuzhiyun 		if (copy_from_user(&rhpt, argp, sizeof(rhpt)))
5374*4882a593Smuzhiyun 			break;
5375*4882a593Smuzhiyun 
5376*4882a593Smuzhiyun 		r = kvm_vm_ioctl_resize_hpt_commit(kvm, &rhpt);
5377*4882a593Smuzhiyun 		break;
5378*4882a593Smuzhiyun 	}
5379*4882a593Smuzhiyun 
5380*4882a593Smuzhiyun 	default:
5381*4882a593Smuzhiyun 		r = -ENOTTY;
5382*4882a593Smuzhiyun 	}
5383*4882a593Smuzhiyun 
5384*4882a593Smuzhiyun 	return r;
5385*4882a593Smuzhiyun }
5386*4882a593Smuzhiyun 
5387*4882a593Smuzhiyun /*
5388*4882a593Smuzhiyun  * List of hcall numbers to enable by default.
5389*4882a593Smuzhiyun  * For compatibility with old userspace, we enable by default
5390*4882a593Smuzhiyun  * all hcalls that were implemented before the hcall-enabling
5391*4882a593Smuzhiyun  * facility was added.  Note this list should not include H_RTAS.
5392*4882a593Smuzhiyun  */
5393*4882a593Smuzhiyun static unsigned int default_hcall_list[] = {
5394*4882a593Smuzhiyun 	H_REMOVE,
5395*4882a593Smuzhiyun 	H_ENTER,
5396*4882a593Smuzhiyun 	H_READ,
5397*4882a593Smuzhiyun 	H_PROTECT,
5398*4882a593Smuzhiyun 	H_BULK_REMOVE,
5399*4882a593Smuzhiyun 	H_GET_TCE,
5400*4882a593Smuzhiyun 	H_PUT_TCE,
5401*4882a593Smuzhiyun 	H_SET_DABR,
5402*4882a593Smuzhiyun 	H_SET_XDABR,
5403*4882a593Smuzhiyun 	H_CEDE,
5404*4882a593Smuzhiyun 	H_PROD,
5405*4882a593Smuzhiyun 	H_CONFER,
5406*4882a593Smuzhiyun 	H_REGISTER_VPA,
5407*4882a593Smuzhiyun #ifdef CONFIG_KVM_XICS
5408*4882a593Smuzhiyun 	H_EOI,
5409*4882a593Smuzhiyun 	H_CPPR,
5410*4882a593Smuzhiyun 	H_IPI,
5411*4882a593Smuzhiyun 	H_IPOLL,
5412*4882a593Smuzhiyun 	H_XIRR,
5413*4882a593Smuzhiyun 	H_XIRR_X,
5414*4882a593Smuzhiyun #endif
5415*4882a593Smuzhiyun 	0
5416*4882a593Smuzhiyun };
5417*4882a593Smuzhiyun 
init_default_hcalls(void)5418*4882a593Smuzhiyun static void init_default_hcalls(void)
5419*4882a593Smuzhiyun {
5420*4882a593Smuzhiyun 	int i;
5421*4882a593Smuzhiyun 	unsigned int hcall;
5422*4882a593Smuzhiyun 
5423*4882a593Smuzhiyun 	for (i = 0; default_hcall_list[i]; ++i) {
5424*4882a593Smuzhiyun 		hcall = default_hcall_list[i];
5425*4882a593Smuzhiyun 		WARN_ON(!kvmppc_hcall_impl_hv(hcall));
5426*4882a593Smuzhiyun 		__set_bit(hcall / 4, default_enabled_hcalls);
5427*4882a593Smuzhiyun 	}
5428*4882a593Smuzhiyun }
5429*4882a593Smuzhiyun 
kvmhv_configure_mmu(struct kvm * kvm,struct kvm_ppc_mmuv3_cfg * cfg)5430*4882a593Smuzhiyun static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg)
5431*4882a593Smuzhiyun {
5432*4882a593Smuzhiyun 	unsigned long lpcr;
5433*4882a593Smuzhiyun 	int radix;
5434*4882a593Smuzhiyun 	int err;
5435*4882a593Smuzhiyun 
5436*4882a593Smuzhiyun 	/* If not on a POWER9, reject it */
5437*4882a593Smuzhiyun 	if (!cpu_has_feature(CPU_FTR_ARCH_300))
5438*4882a593Smuzhiyun 		return -ENODEV;
5439*4882a593Smuzhiyun 
5440*4882a593Smuzhiyun 	/* If any unknown flags set, reject it */
5441*4882a593Smuzhiyun 	if (cfg->flags & ~(KVM_PPC_MMUV3_RADIX | KVM_PPC_MMUV3_GTSE))
5442*4882a593Smuzhiyun 		return -EINVAL;
5443*4882a593Smuzhiyun 
5444*4882a593Smuzhiyun 	/* GR (guest radix) bit in process_table field must match */
5445*4882a593Smuzhiyun 	radix = !!(cfg->flags & KVM_PPC_MMUV3_RADIX);
5446*4882a593Smuzhiyun 	if (!!(cfg->process_table & PATB_GR) != radix)
5447*4882a593Smuzhiyun 		return -EINVAL;
5448*4882a593Smuzhiyun 
5449*4882a593Smuzhiyun 	/* Process table size field must be reasonable, i.e. <= 24 */
5450*4882a593Smuzhiyun 	if ((cfg->process_table & PRTS_MASK) > 24)
5451*4882a593Smuzhiyun 		return -EINVAL;
5452*4882a593Smuzhiyun 
5453*4882a593Smuzhiyun 	/* We can change a guest to/from radix now, if the host is radix */
5454*4882a593Smuzhiyun 	if (radix && !radix_enabled())
5455*4882a593Smuzhiyun 		return -EINVAL;
5456*4882a593Smuzhiyun 
5457*4882a593Smuzhiyun 	/* If we're a nested hypervisor, we currently only support radix */
5458*4882a593Smuzhiyun 	if (kvmhv_on_pseries() && !radix)
5459*4882a593Smuzhiyun 		return -EINVAL;
5460*4882a593Smuzhiyun 
5461*4882a593Smuzhiyun 	mutex_lock(&kvm->arch.mmu_setup_lock);
5462*4882a593Smuzhiyun 	if (radix != kvm_is_radix(kvm)) {
5463*4882a593Smuzhiyun 		if (kvm->arch.mmu_ready) {
5464*4882a593Smuzhiyun 			kvm->arch.mmu_ready = 0;
5465*4882a593Smuzhiyun 			/* order mmu_ready vs. vcpus_running */
5466*4882a593Smuzhiyun 			smp_mb();
5467*4882a593Smuzhiyun 			if (atomic_read(&kvm->arch.vcpus_running)) {
5468*4882a593Smuzhiyun 				kvm->arch.mmu_ready = 1;
5469*4882a593Smuzhiyun 				err = -EBUSY;
5470*4882a593Smuzhiyun 				goto out_unlock;
5471*4882a593Smuzhiyun 			}
5472*4882a593Smuzhiyun 		}
5473*4882a593Smuzhiyun 		if (radix)
5474*4882a593Smuzhiyun 			err = kvmppc_switch_mmu_to_radix(kvm);
5475*4882a593Smuzhiyun 		else
5476*4882a593Smuzhiyun 			err = kvmppc_switch_mmu_to_hpt(kvm);
5477*4882a593Smuzhiyun 		if (err)
5478*4882a593Smuzhiyun 			goto out_unlock;
5479*4882a593Smuzhiyun 	}
5480*4882a593Smuzhiyun 
5481*4882a593Smuzhiyun 	kvm->arch.process_table = cfg->process_table;
5482*4882a593Smuzhiyun 	kvmppc_setup_partition_table(kvm);
5483*4882a593Smuzhiyun 
5484*4882a593Smuzhiyun 	lpcr = (cfg->flags & KVM_PPC_MMUV3_GTSE) ? LPCR_GTSE : 0;
5485*4882a593Smuzhiyun 	kvmppc_update_lpcr(kvm, lpcr, LPCR_GTSE);
5486*4882a593Smuzhiyun 	err = 0;
5487*4882a593Smuzhiyun 
5488*4882a593Smuzhiyun  out_unlock:
5489*4882a593Smuzhiyun 	mutex_unlock(&kvm->arch.mmu_setup_lock);
5490*4882a593Smuzhiyun 	return err;
5491*4882a593Smuzhiyun }
5492*4882a593Smuzhiyun 
kvmhv_enable_nested(struct kvm * kvm)5493*4882a593Smuzhiyun static int kvmhv_enable_nested(struct kvm *kvm)
5494*4882a593Smuzhiyun {
5495*4882a593Smuzhiyun 	if (!nested)
5496*4882a593Smuzhiyun 		return -EPERM;
5497*4882a593Smuzhiyun 	if (!cpu_has_feature(CPU_FTR_ARCH_300) || no_mixing_hpt_and_radix)
5498*4882a593Smuzhiyun 		return -ENODEV;
5499*4882a593Smuzhiyun 
5500*4882a593Smuzhiyun 	/* kvm == NULL means the caller is testing if the capability exists */
5501*4882a593Smuzhiyun 	if (kvm)
5502*4882a593Smuzhiyun 		kvm->arch.nested_enable = true;
5503*4882a593Smuzhiyun 	return 0;
5504*4882a593Smuzhiyun }
5505*4882a593Smuzhiyun 
kvmhv_load_from_eaddr(struct kvm_vcpu * vcpu,ulong * eaddr,void * ptr,int size)5506*4882a593Smuzhiyun static int kvmhv_load_from_eaddr(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
5507*4882a593Smuzhiyun 				 int size)
5508*4882a593Smuzhiyun {
5509*4882a593Smuzhiyun 	int rc = -EINVAL;
5510*4882a593Smuzhiyun 
5511*4882a593Smuzhiyun 	if (kvmhv_vcpu_is_radix(vcpu)) {
5512*4882a593Smuzhiyun 		rc = kvmhv_copy_from_guest_radix(vcpu, *eaddr, ptr, size);
5513*4882a593Smuzhiyun 
5514*4882a593Smuzhiyun 		if (rc > 0)
5515*4882a593Smuzhiyun 			rc = -EINVAL;
5516*4882a593Smuzhiyun 	}
5517*4882a593Smuzhiyun 
5518*4882a593Smuzhiyun 	/* For now quadrants are the only way to access nested guest memory */
5519*4882a593Smuzhiyun 	if (rc && vcpu->arch.nested)
5520*4882a593Smuzhiyun 		rc = -EAGAIN;
5521*4882a593Smuzhiyun 
5522*4882a593Smuzhiyun 	return rc;
5523*4882a593Smuzhiyun }
5524*4882a593Smuzhiyun 
kvmhv_store_to_eaddr(struct kvm_vcpu * vcpu,ulong * eaddr,void * ptr,int size)5525*4882a593Smuzhiyun static int kvmhv_store_to_eaddr(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
5526*4882a593Smuzhiyun 				int size)
5527*4882a593Smuzhiyun {
5528*4882a593Smuzhiyun 	int rc = -EINVAL;
5529*4882a593Smuzhiyun 
5530*4882a593Smuzhiyun 	if (kvmhv_vcpu_is_radix(vcpu)) {
5531*4882a593Smuzhiyun 		rc = kvmhv_copy_to_guest_radix(vcpu, *eaddr, ptr, size);
5532*4882a593Smuzhiyun 
5533*4882a593Smuzhiyun 		if (rc > 0)
5534*4882a593Smuzhiyun 			rc = -EINVAL;
5535*4882a593Smuzhiyun 	}
5536*4882a593Smuzhiyun 
5537*4882a593Smuzhiyun 	/* For now quadrants are the only way to access nested guest memory */
5538*4882a593Smuzhiyun 	if (rc && vcpu->arch.nested)
5539*4882a593Smuzhiyun 		rc = -EAGAIN;
5540*4882a593Smuzhiyun 
5541*4882a593Smuzhiyun 	return rc;
5542*4882a593Smuzhiyun }
5543*4882a593Smuzhiyun 
unpin_vpa_reset(struct kvm * kvm,struct kvmppc_vpa * vpa)5544*4882a593Smuzhiyun static void unpin_vpa_reset(struct kvm *kvm, struct kvmppc_vpa *vpa)
5545*4882a593Smuzhiyun {
5546*4882a593Smuzhiyun 	unpin_vpa(kvm, vpa);
5547*4882a593Smuzhiyun 	vpa->gpa = 0;
5548*4882a593Smuzhiyun 	vpa->pinned_addr = NULL;
5549*4882a593Smuzhiyun 	vpa->dirty = false;
5550*4882a593Smuzhiyun 	vpa->update_pending = 0;
5551*4882a593Smuzhiyun }
5552*4882a593Smuzhiyun 
5553*4882a593Smuzhiyun /*
5554*4882a593Smuzhiyun  * Enable a guest to become a secure VM, or test whether
5555*4882a593Smuzhiyun  * that could be enabled.
5556*4882a593Smuzhiyun  * Called when the KVM_CAP_PPC_SECURE_GUEST capability is
5557*4882a593Smuzhiyun  * tested (kvm == NULL) or enabled (kvm != NULL).
5558*4882a593Smuzhiyun  */
kvmhv_enable_svm(struct kvm * kvm)5559*4882a593Smuzhiyun static int kvmhv_enable_svm(struct kvm *kvm)
5560*4882a593Smuzhiyun {
5561*4882a593Smuzhiyun 	if (!kvmppc_uvmem_available())
5562*4882a593Smuzhiyun 		return -EINVAL;
5563*4882a593Smuzhiyun 	if (kvm)
5564*4882a593Smuzhiyun 		kvm->arch.svm_enabled = 1;
5565*4882a593Smuzhiyun 	return 0;
5566*4882a593Smuzhiyun }
5567*4882a593Smuzhiyun 
5568*4882a593Smuzhiyun /*
5569*4882a593Smuzhiyun  *  IOCTL handler to turn off secure mode of guest
5570*4882a593Smuzhiyun  *
5571*4882a593Smuzhiyun  * - Release all device pages
5572*4882a593Smuzhiyun  * - Issue ucall to terminate the guest on the UV side
5573*4882a593Smuzhiyun  * - Unpin the VPA pages.
5574*4882a593Smuzhiyun  * - Reinit the partition scoped page tables
5575*4882a593Smuzhiyun  */
kvmhv_svm_off(struct kvm * kvm)5576*4882a593Smuzhiyun static int kvmhv_svm_off(struct kvm *kvm)
5577*4882a593Smuzhiyun {
5578*4882a593Smuzhiyun 	struct kvm_vcpu *vcpu;
5579*4882a593Smuzhiyun 	int mmu_was_ready;
5580*4882a593Smuzhiyun 	int srcu_idx;
5581*4882a593Smuzhiyun 	int ret = 0;
5582*4882a593Smuzhiyun 	int i;
5583*4882a593Smuzhiyun 
5584*4882a593Smuzhiyun 	if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
5585*4882a593Smuzhiyun 		return ret;
5586*4882a593Smuzhiyun 
5587*4882a593Smuzhiyun 	mutex_lock(&kvm->arch.mmu_setup_lock);
5588*4882a593Smuzhiyun 	mmu_was_ready = kvm->arch.mmu_ready;
5589*4882a593Smuzhiyun 	if (kvm->arch.mmu_ready) {
5590*4882a593Smuzhiyun 		kvm->arch.mmu_ready = 0;
5591*4882a593Smuzhiyun 		/* order mmu_ready vs. vcpus_running */
5592*4882a593Smuzhiyun 		smp_mb();
5593*4882a593Smuzhiyun 		if (atomic_read(&kvm->arch.vcpus_running)) {
5594*4882a593Smuzhiyun 			kvm->arch.mmu_ready = 1;
5595*4882a593Smuzhiyun 			ret = -EBUSY;
5596*4882a593Smuzhiyun 			goto out;
5597*4882a593Smuzhiyun 		}
5598*4882a593Smuzhiyun 	}
5599*4882a593Smuzhiyun 
5600*4882a593Smuzhiyun 	srcu_idx = srcu_read_lock(&kvm->srcu);
5601*4882a593Smuzhiyun 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
5602*4882a593Smuzhiyun 		struct kvm_memory_slot *memslot;
5603*4882a593Smuzhiyun 		struct kvm_memslots *slots = __kvm_memslots(kvm, i);
5604*4882a593Smuzhiyun 
5605*4882a593Smuzhiyun 		if (!slots)
5606*4882a593Smuzhiyun 			continue;
5607*4882a593Smuzhiyun 
5608*4882a593Smuzhiyun 		kvm_for_each_memslot(memslot, slots) {
5609*4882a593Smuzhiyun 			kvmppc_uvmem_drop_pages(memslot, kvm, true);
5610*4882a593Smuzhiyun 			uv_unregister_mem_slot(kvm->arch.lpid, memslot->id);
5611*4882a593Smuzhiyun 		}
5612*4882a593Smuzhiyun 	}
5613*4882a593Smuzhiyun 	srcu_read_unlock(&kvm->srcu, srcu_idx);
5614*4882a593Smuzhiyun 
5615*4882a593Smuzhiyun 	ret = uv_svm_terminate(kvm->arch.lpid);
5616*4882a593Smuzhiyun 	if (ret != U_SUCCESS) {
5617*4882a593Smuzhiyun 		ret = -EINVAL;
5618*4882a593Smuzhiyun 		goto out;
5619*4882a593Smuzhiyun 	}
5620*4882a593Smuzhiyun 
5621*4882a593Smuzhiyun 	/*
5622*4882a593Smuzhiyun 	 * When secure guest is reset, all the guest pages are sent
5623*4882a593Smuzhiyun 	 * to UV via UV_PAGE_IN before the non-boot vcpus get a
5624*4882a593Smuzhiyun 	 * chance to run and unpin their VPA pages. Unpinning of all
5625*4882a593Smuzhiyun 	 * VPA pages is done here explicitly so that VPA pages
5626*4882a593Smuzhiyun 	 * can be migrated to the secure side.
5627*4882a593Smuzhiyun 	 *
5628*4882a593Smuzhiyun 	 * This is required to for the secure SMP guest to reboot
5629*4882a593Smuzhiyun 	 * correctly.
5630*4882a593Smuzhiyun 	 */
5631*4882a593Smuzhiyun 	kvm_for_each_vcpu(i, vcpu, kvm) {
5632*4882a593Smuzhiyun 		spin_lock(&vcpu->arch.vpa_update_lock);
5633*4882a593Smuzhiyun 		unpin_vpa_reset(kvm, &vcpu->arch.dtl);
5634*4882a593Smuzhiyun 		unpin_vpa_reset(kvm, &vcpu->arch.slb_shadow);
5635*4882a593Smuzhiyun 		unpin_vpa_reset(kvm, &vcpu->arch.vpa);
5636*4882a593Smuzhiyun 		spin_unlock(&vcpu->arch.vpa_update_lock);
5637*4882a593Smuzhiyun 	}
5638*4882a593Smuzhiyun 
5639*4882a593Smuzhiyun 	kvmppc_setup_partition_table(kvm);
5640*4882a593Smuzhiyun 	kvm->arch.secure_guest = 0;
5641*4882a593Smuzhiyun 	kvm->arch.mmu_ready = mmu_was_ready;
5642*4882a593Smuzhiyun out:
5643*4882a593Smuzhiyun 	mutex_unlock(&kvm->arch.mmu_setup_lock);
5644*4882a593Smuzhiyun 	return ret;
5645*4882a593Smuzhiyun }
5646*4882a593Smuzhiyun 
5647*4882a593Smuzhiyun static struct kvmppc_ops kvm_ops_hv = {
5648*4882a593Smuzhiyun 	.get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
5649*4882a593Smuzhiyun 	.set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
5650*4882a593Smuzhiyun 	.get_one_reg = kvmppc_get_one_reg_hv,
5651*4882a593Smuzhiyun 	.set_one_reg = kvmppc_set_one_reg_hv,
5652*4882a593Smuzhiyun 	.vcpu_load   = kvmppc_core_vcpu_load_hv,
5653*4882a593Smuzhiyun 	.vcpu_put    = kvmppc_core_vcpu_put_hv,
5654*4882a593Smuzhiyun 	.inject_interrupt = kvmppc_inject_interrupt_hv,
5655*4882a593Smuzhiyun 	.set_msr     = kvmppc_set_msr_hv,
5656*4882a593Smuzhiyun 	.vcpu_run    = kvmppc_vcpu_run_hv,
5657*4882a593Smuzhiyun 	.vcpu_create = kvmppc_core_vcpu_create_hv,
5658*4882a593Smuzhiyun 	.vcpu_free   = kvmppc_core_vcpu_free_hv,
5659*4882a593Smuzhiyun 	.check_requests = kvmppc_core_check_requests_hv,
5660*4882a593Smuzhiyun 	.get_dirty_log  = kvm_vm_ioctl_get_dirty_log_hv,
5661*4882a593Smuzhiyun 	.flush_memslot  = kvmppc_core_flush_memslot_hv,
5662*4882a593Smuzhiyun 	.prepare_memory_region = kvmppc_core_prepare_memory_region_hv,
5663*4882a593Smuzhiyun 	.commit_memory_region  = kvmppc_core_commit_memory_region_hv,
5664*4882a593Smuzhiyun 	.unmap_hva_range = kvm_unmap_hva_range_hv,
5665*4882a593Smuzhiyun 	.age_hva  = kvm_age_hva_hv,
5666*4882a593Smuzhiyun 	.test_age_hva = kvm_test_age_hva_hv,
5667*4882a593Smuzhiyun 	.set_spte_hva = kvm_set_spte_hva_hv,
5668*4882a593Smuzhiyun 	.free_memslot = kvmppc_core_free_memslot_hv,
5669*4882a593Smuzhiyun 	.init_vm =  kvmppc_core_init_vm_hv,
5670*4882a593Smuzhiyun 	.destroy_vm = kvmppc_core_destroy_vm_hv,
5671*4882a593Smuzhiyun 	.get_smmu_info = kvm_vm_ioctl_get_smmu_info_hv,
5672*4882a593Smuzhiyun 	.emulate_op = kvmppc_core_emulate_op_hv,
5673*4882a593Smuzhiyun 	.emulate_mtspr = kvmppc_core_emulate_mtspr_hv,
5674*4882a593Smuzhiyun 	.emulate_mfspr = kvmppc_core_emulate_mfspr_hv,
5675*4882a593Smuzhiyun 	.fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv,
5676*4882a593Smuzhiyun 	.arch_vm_ioctl  = kvm_arch_vm_ioctl_hv,
5677*4882a593Smuzhiyun 	.hcall_implemented = kvmppc_hcall_impl_hv,
5678*4882a593Smuzhiyun #ifdef CONFIG_KVM_XICS
5679*4882a593Smuzhiyun 	.irq_bypass_add_producer = kvmppc_irq_bypass_add_producer_hv,
5680*4882a593Smuzhiyun 	.irq_bypass_del_producer = kvmppc_irq_bypass_del_producer_hv,
5681*4882a593Smuzhiyun #endif
5682*4882a593Smuzhiyun 	.configure_mmu = kvmhv_configure_mmu,
5683*4882a593Smuzhiyun 	.get_rmmu_info = kvmhv_get_rmmu_info,
5684*4882a593Smuzhiyun 	.set_smt_mode = kvmhv_set_smt_mode,
5685*4882a593Smuzhiyun 	.enable_nested = kvmhv_enable_nested,
5686*4882a593Smuzhiyun 	.load_from_eaddr = kvmhv_load_from_eaddr,
5687*4882a593Smuzhiyun 	.store_to_eaddr = kvmhv_store_to_eaddr,
5688*4882a593Smuzhiyun 	.enable_svm = kvmhv_enable_svm,
5689*4882a593Smuzhiyun 	.svm_off = kvmhv_svm_off,
5690*4882a593Smuzhiyun };
5691*4882a593Smuzhiyun 
kvm_init_subcore_bitmap(void)5692*4882a593Smuzhiyun static int kvm_init_subcore_bitmap(void)
5693*4882a593Smuzhiyun {
5694*4882a593Smuzhiyun 	int i, j;
5695*4882a593Smuzhiyun 	int nr_cores = cpu_nr_cores();
5696*4882a593Smuzhiyun 	struct sibling_subcore_state *sibling_subcore_state;
5697*4882a593Smuzhiyun 
5698*4882a593Smuzhiyun 	for (i = 0; i < nr_cores; i++) {
5699*4882a593Smuzhiyun 		int first_cpu = i * threads_per_core;
5700*4882a593Smuzhiyun 		int node = cpu_to_node(first_cpu);
5701*4882a593Smuzhiyun 
5702*4882a593Smuzhiyun 		/* Ignore if it is already allocated. */
5703*4882a593Smuzhiyun 		if (paca_ptrs[first_cpu]->sibling_subcore_state)
5704*4882a593Smuzhiyun 			continue;
5705*4882a593Smuzhiyun 
5706*4882a593Smuzhiyun 		sibling_subcore_state =
5707*4882a593Smuzhiyun 			kzalloc_node(sizeof(struct sibling_subcore_state),
5708*4882a593Smuzhiyun 							GFP_KERNEL, node);
5709*4882a593Smuzhiyun 		if (!sibling_subcore_state)
5710*4882a593Smuzhiyun 			return -ENOMEM;
5711*4882a593Smuzhiyun 
5712*4882a593Smuzhiyun 
5713*4882a593Smuzhiyun 		for (j = 0; j < threads_per_core; j++) {
5714*4882a593Smuzhiyun 			int cpu = first_cpu + j;
5715*4882a593Smuzhiyun 
5716*4882a593Smuzhiyun 			paca_ptrs[cpu]->sibling_subcore_state =
5717*4882a593Smuzhiyun 						sibling_subcore_state;
5718*4882a593Smuzhiyun 		}
5719*4882a593Smuzhiyun 	}
5720*4882a593Smuzhiyun 	return 0;
5721*4882a593Smuzhiyun }
5722*4882a593Smuzhiyun 
kvmppc_radix_possible(void)5723*4882a593Smuzhiyun static int kvmppc_radix_possible(void)
5724*4882a593Smuzhiyun {
5725*4882a593Smuzhiyun 	return cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled();
5726*4882a593Smuzhiyun }
5727*4882a593Smuzhiyun 
kvmppc_book3s_init_hv(void)5728*4882a593Smuzhiyun static int kvmppc_book3s_init_hv(void)
5729*4882a593Smuzhiyun {
5730*4882a593Smuzhiyun 	int r;
5731*4882a593Smuzhiyun 
5732*4882a593Smuzhiyun 	if (!tlbie_capable) {
5733*4882a593Smuzhiyun 		pr_err("KVM-HV: Host does not support TLBIE\n");
5734*4882a593Smuzhiyun 		return -ENODEV;
5735*4882a593Smuzhiyun 	}
5736*4882a593Smuzhiyun 
5737*4882a593Smuzhiyun 	/*
5738*4882a593Smuzhiyun 	 * FIXME!! Do we need to check on all cpus ?
5739*4882a593Smuzhiyun 	 */
5740*4882a593Smuzhiyun 	r = kvmppc_core_check_processor_compat_hv();
5741*4882a593Smuzhiyun 	if (r < 0)
5742*4882a593Smuzhiyun 		return -ENODEV;
5743*4882a593Smuzhiyun 
5744*4882a593Smuzhiyun 	r = kvmhv_nested_init();
5745*4882a593Smuzhiyun 	if (r)
5746*4882a593Smuzhiyun 		return r;
5747*4882a593Smuzhiyun 
5748*4882a593Smuzhiyun 	r = kvm_init_subcore_bitmap();
5749*4882a593Smuzhiyun 	if (r)
5750*4882a593Smuzhiyun 		return r;
5751*4882a593Smuzhiyun 
5752*4882a593Smuzhiyun 	/*
5753*4882a593Smuzhiyun 	 * We need a way of accessing the XICS interrupt controller,
5754*4882a593Smuzhiyun 	 * either directly, via paca_ptrs[cpu]->kvm_hstate.xics_phys, or
5755*4882a593Smuzhiyun 	 * indirectly, via OPAL.
5756*4882a593Smuzhiyun 	 */
5757*4882a593Smuzhiyun #ifdef CONFIG_SMP
5758*4882a593Smuzhiyun 	if (!xics_on_xive() && !kvmhv_on_pseries() &&
5759*4882a593Smuzhiyun 	    !local_paca->kvm_hstate.xics_phys) {
5760*4882a593Smuzhiyun 		struct device_node *np;
5761*4882a593Smuzhiyun 
5762*4882a593Smuzhiyun 		np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc");
5763*4882a593Smuzhiyun 		if (!np) {
5764*4882a593Smuzhiyun 			pr_err("KVM-HV: Cannot determine method for accessing XICS\n");
5765*4882a593Smuzhiyun 			return -ENODEV;
5766*4882a593Smuzhiyun 		}
5767*4882a593Smuzhiyun 		/* presence of intc confirmed - node can be dropped again */
5768*4882a593Smuzhiyun 		of_node_put(np);
5769*4882a593Smuzhiyun 	}
5770*4882a593Smuzhiyun #endif
5771*4882a593Smuzhiyun 
5772*4882a593Smuzhiyun 	kvm_ops_hv.owner = THIS_MODULE;
5773*4882a593Smuzhiyun 	kvmppc_hv_ops = &kvm_ops_hv;
5774*4882a593Smuzhiyun 
5775*4882a593Smuzhiyun 	init_default_hcalls();
5776*4882a593Smuzhiyun 
5777*4882a593Smuzhiyun 	init_vcore_lists();
5778*4882a593Smuzhiyun 
5779*4882a593Smuzhiyun 	r = kvmppc_mmu_hv_init();
5780*4882a593Smuzhiyun 	if (r)
5781*4882a593Smuzhiyun 		return r;
5782*4882a593Smuzhiyun 
5783*4882a593Smuzhiyun 	if (kvmppc_radix_possible()) {
5784*4882a593Smuzhiyun 		r = kvmppc_radix_init();
5785*4882a593Smuzhiyun 		if (r)
5786*4882a593Smuzhiyun 			return r;
5787*4882a593Smuzhiyun 	}
5788*4882a593Smuzhiyun 
5789*4882a593Smuzhiyun 	/*
5790*4882a593Smuzhiyun 	 * POWER9 chips before version 2.02 can't have some threads in
5791*4882a593Smuzhiyun 	 * HPT mode and some in radix mode on the same core.
5792*4882a593Smuzhiyun 	 */
5793*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
5794*4882a593Smuzhiyun 		unsigned int pvr = mfspr(SPRN_PVR);
5795*4882a593Smuzhiyun 		if ((pvr >> 16) == PVR_POWER9 &&
5796*4882a593Smuzhiyun 		    (((pvr & 0xe000) == 0 && (pvr & 0xfff) < 0x202) ||
5797*4882a593Smuzhiyun 		     ((pvr & 0xe000) == 0x2000 && (pvr & 0xfff) < 0x101)))
5798*4882a593Smuzhiyun 			no_mixing_hpt_and_radix = true;
5799*4882a593Smuzhiyun 	}
5800*4882a593Smuzhiyun 
5801*4882a593Smuzhiyun 	r = kvmppc_uvmem_init();
5802*4882a593Smuzhiyun 	if (r < 0)
5803*4882a593Smuzhiyun 		pr_err("KVM-HV: kvmppc_uvmem_init failed %d\n", r);
5804*4882a593Smuzhiyun 
5805*4882a593Smuzhiyun 	return r;
5806*4882a593Smuzhiyun }
5807*4882a593Smuzhiyun 
kvmppc_book3s_exit_hv(void)5808*4882a593Smuzhiyun static void kvmppc_book3s_exit_hv(void)
5809*4882a593Smuzhiyun {
5810*4882a593Smuzhiyun 	kvmppc_uvmem_free();
5811*4882a593Smuzhiyun 	kvmppc_free_host_rm_ops();
5812*4882a593Smuzhiyun 	if (kvmppc_radix_possible())
5813*4882a593Smuzhiyun 		kvmppc_radix_exit();
5814*4882a593Smuzhiyun 	kvmppc_hv_ops = NULL;
5815*4882a593Smuzhiyun 	kvmhv_nested_exit();
5816*4882a593Smuzhiyun }
5817*4882a593Smuzhiyun 
5818*4882a593Smuzhiyun module_init(kvmppc_book3s_init_hv);
5819*4882a593Smuzhiyun module_exit(kvmppc_book3s_exit_hv);
5820*4882a593Smuzhiyun MODULE_LICENSE("GPL");
5821*4882a593Smuzhiyun MODULE_ALIAS_MISCDEV(KVM_MINOR);
5822*4882a593Smuzhiyun MODULE_ALIAS("devname:kvm");
5823