1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright IBM Corp. 2007
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Authors: Hollis Blanchard <hollisb@us.ibm.com>
7*4882a593Smuzhiyun * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/errno.h>
11*4882a593Smuzhiyun #include <linux/err.h>
12*4882a593Smuzhiyun #include <linux/kvm_host.h>
13*4882a593Smuzhiyun #include <linux/vmalloc.h>
14*4882a593Smuzhiyun #include <linux/hrtimer.h>
15*4882a593Smuzhiyun #include <linux/sched/signal.h>
16*4882a593Smuzhiyun #include <linux/fs.h>
17*4882a593Smuzhiyun #include <linux/slab.h>
18*4882a593Smuzhiyun #include <linux/file.h>
19*4882a593Smuzhiyun #include <linux/module.h>
20*4882a593Smuzhiyun #include <linux/irqbypass.h>
21*4882a593Smuzhiyun #include <linux/kvm_irqfd.h>
22*4882a593Smuzhiyun #include <asm/cputable.h>
23*4882a593Smuzhiyun #include <linux/uaccess.h>
24*4882a593Smuzhiyun #include <asm/kvm_ppc.h>
25*4882a593Smuzhiyun #include <asm/cputhreads.h>
26*4882a593Smuzhiyun #include <asm/irqflags.h>
27*4882a593Smuzhiyun #include <asm/iommu.h>
28*4882a593Smuzhiyun #include <asm/switch_to.h>
29*4882a593Smuzhiyun #include <asm/xive.h>
30*4882a593Smuzhiyun #ifdef CONFIG_PPC_PSERIES
31*4882a593Smuzhiyun #include <asm/hvcall.h>
32*4882a593Smuzhiyun #include <asm/plpar_wrappers.h>
33*4882a593Smuzhiyun #endif
34*4882a593Smuzhiyun #include <asm/ultravisor.h>
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #include "timing.h"
37*4882a593Smuzhiyun #include "irq.h"
38*4882a593Smuzhiyun #include "../mm/mmu_decl.h"
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun #define CREATE_TRACE_POINTS
41*4882a593Smuzhiyun #include "trace.h"
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun struct kvmppc_ops *kvmppc_hv_ops;
44*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
45*4882a593Smuzhiyun struct kvmppc_ops *kvmppc_pr_ops;
46*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun
kvm_arch_vcpu_runnable(struct kvm_vcpu * v)49*4882a593Smuzhiyun int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun
kvm_arch_dy_runnable(struct kvm_vcpu * vcpu)54*4882a593Smuzhiyun bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun return kvm_arch_vcpu_runnable(vcpu);
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun
kvm_arch_vcpu_in_kernel(struct kvm_vcpu * vcpu)59*4882a593Smuzhiyun bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun return false;
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun
kvm_arch_vcpu_should_kick(struct kvm_vcpu * vcpu)64*4882a593Smuzhiyun int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun return 1;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /*
70*4882a593Smuzhiyun * Common checks before entering the guest world. Call with interrupts
71*4882a593Smuzhiyun * disabled.
72*4882a593Smuzhiyun *
73*4882a593Smuzhiyun * returns:
74*4882a593Smuzhiyun *
75*4882a593Smuzhiyun * == 1 if we're ready to go into guest state
76*4882a593Smuzhiyun * <= 0 if we need to go back to the host with return value
77*4882a593Smuzhiyun */
kvmppc_prepare_to_enter(struct kvm_vcpu * vcpu)78*4882a593Smuzhiyun int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun int r;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun WARN_ON(irqs_disabled());
83*4882a593Smuzhiyun hard_irq_disable();
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun while (true) {
86*4882a593Smuzhiyun if (need_resched()) {
87*4882a593Smuzhiyun local_irq_enable();
88*4882a593Smuzhiyun cond_resched();
89*4882a593Smuzhiyun hard_irq_disable();
90*4882a593Smuzhiyun continue;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun if (signal_pending(current)) {
94*4882a593Smuzhiyun kvmppc_account_exit(vcpu, SIGNAL_EXITS);
95*4882a593Smuzhiyun vcpu->run->exit_reason = KVM_EXIT_INTR;
96*4882a593Smuzhiyun r = -EINTR;
97*4882a593Smuzhiyun break;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun vcpu->mode = IN_GUEST_MODE;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun /*
103*4882a593Smuzhiyun * Reading vcpu->requests must happen after setting vcpu->mode,
104*4882a593Smuzhiyun * so we don't miss a request because the requester sees
105*4882a593Smuzhiyun * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
106*4882a593Smuzhiyun * before next entering the guest (and thus doesn't IPI).
107*4882a593Smuzhiyun * This also orders the write to mode from any reads
108*4882a593Smuzhiyun * to the page tables done while the VCPU is running.
109*4882a593Smuzhiyun * Please see the comment in kvm_flush_remote_tlbs.
110*4882a593Smuzhiyun */
111*4882a593Smuzhiyun smp_mb();
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun if (kvm_request_pending(vcpu)) {
114*4882a593Smuzhiyun /* Make sure we process requests preemptable */
115*4882a593Smuzhiyun local_irq_enable();
116*4882a593Smuzhiyun trace_kvm_check_requests(vcpu);
117*4882a593Smuzhiyun r = kvmppc_core_check_requests(vcpu);
118*4882a593Smuzhiyun hard_irq_disable();
119*4882a593Smuzhiyun if (r > 0)
120*4882a593Smuzhiyun continue;
121*4882a593Smuzhiyun break;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun if (kvmppc_core_prepare_to_enter(vcpu)) {
125*4882a593Smuzhiyun /* interrupts got enabled in between, so we
126*4882a593Smuzhiyun are back at square 1 */
127*4882a593Smuzhiyun continue;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun guest_enter_irqoff();
131*4882a593Smuzhiyun return 1;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun /* return to host */
135*4882a593Smuzhiyun local_irq_enable();
136*4882a593Smuzhiyun return r;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
kvmppc_swab_shared(struct kvm_vcpu * vcpu)141*4882a593Smuzhiyun static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
144*4882a593Smuzhiyun int i;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun shared->sprg0 = swab64(shared->sprg0);
147*4882a593Smuzhiyun shared->sprg1 = swab64(shared->sprg1);
148*4882a593Smuzhiyun shared->sprg2 = swab64(shared->sprg2);
149*4882a593Smuzhiyun shared->sprg3 = swab64(shared->sprg3);
150*4882a593Smuzhiyun shared->srr0 = swab64(shared->srr0);
151*4882a593Smuzhiyun shared->srr1 = swab64(shared->srr1);
152*4882a593Smuzhiyun shared->dar = swab64(shared->dar);
153*4882a593Smuzhiyun shared->msr = swab64(shared->msr);
154*4882a593Smuzhiyun shared->dsisr = swab32(shared->dsisr);
155*4882a593Smuzhiyun shared->int_pending = swab32(shared->int_pending);
156*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
157*4882a593Smuzhiyun shared->sr[i] = swab32(shared->sr[i]);
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun #endif
160*4882a593Smuzhiyun
kvmppc_kvm_pv(struct kvm_vcpu * vcpu)161*4882a593Smuzhiyun int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun int nr = kvmppc_get_gpr(vcpu, 11);
164*4882a593Smuzhiyun int r;
165*4882a593Smuzhiyun unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
166*4882a593Smuzhiyun unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
167*4882a593Smuzhiyun unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
168*4882a593Smuzhiyun unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
169*4882a593Smuzhiyun unsigned long r2 = 0;
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
172*4882a593Smuzhiyun /* 32 bit mode */
173*4882a593Smuzhiyun param1 &= 0xffffffff;
174*4882a593Smuzhiyun param2 &= 0xffffffff;
175*4882a593Smuzhiyun param3 &= 0xffffffff;
176*4882a593Smuzhiyun param4 &= 0xffffffff;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun switch (nr) {
180*4882a593Smuzhiyun case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
183*4882a593Smuzhiyun /* Book3S can be little endian, find it out here */
184*4882a593Smuzhiyun int shared_big_endian = true;
185*4882a593Smuzhiyun if (vcpu->arch.intr_msr & MSR_LE)
186*4882a593Smuzhiyun shared_big_endian = false;
187*4882a593Smuzhiyun if (shared_big_endian != vcpu->arch.shared_big_endian)
188*4882a593Smuzhiyun kvmppc_swab_shared(vcpu);
189*4882a593Smuzhiyun vcpu->arch.shared_big_endian = shared_big_endian;
190*4882a593Smuzhiyun #endif
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
193*4882a593Smuzhiyun /*
194*4882a593Smuzhiyun * Older versions of the Linux magic page code had
195*4882a593Smuzhiyun * a bug where they would map their trampoline code
196*4882a593Smuzhiyun * NX. If that's the case, remove !PR NX capability.
197*4882a593Smuzhiyun */
198*4882a593Smuzhiyun vcpu->arch.disable_kernel_nx = true;
199*4882a593Smuzhiyun kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
203*4882a593Smuzhiyun vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun #ifdef CONFIG_PPC_64K_PAGES
206*4882a593Smuzhiyun /*
207*4882a593Smuzhiyun * Make sure our 4k magic page is in the same window of a 64k
208*4882a593Smuzhiyun * page within the guest and within the host's page.
209*4882a593Smuzhiyun */
210*4882a593Smuzhiyun if ((vcpu->arch.magic_page_pa & 0xf000) !=
211*4882a593Smuzhiyun ((ulong)vcpu->arch.shared & 0xf000)) {
212*4882a593Smuzhiyun void *old_shared = vcpu->arch.shared;
213*4882a593Smuzhiyun ulong shared = (ulong)vcpu->arch.shared;
214*4882a593Smuzhiyun void *new_shared;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun shared &= PAGE_MASK;
217*4882a593Smuzhiyun shared |= vcpu->arch.magic_page_pa & 0xf000;
218*4882a593Smuzhiyun new_shared = (void*)shared;
219*4882a593Smuzhiyun memcpy(new_shared, old_shared, 0x1000);
220*4882a593Smuzhiyun vcpu->arch.shared = new_shared;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun #endif
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun r = EV_SUCCESS;
227*4882a593Smuzhiyun break;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
230*4882a593Smuzhiyun r = EV_SUCCESS;
231*4882a593Smuzhiyun #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
232*4882a593Smuzhiyun r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
233*4882a593Smuzhiyun #endif
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /* Second return value is in r4 */
236*4882a593Smuzhiyun break;
237*4882a593Smuzhiyun case EV_HCALL_TOKEN(EV_IDLE):
238*4882a593Smuzhiyun r = EV_SUCCESS;
239*4882a593Smuzhiyun kvm_vcpu_block(vcpu);
240*4882a593Smuzhiyun kvm_clear_request(KVM_REQ_UNHALT, vcpu);
241*4882a593Smuzhiyun break;
242*4882a593Smuzhiyun default:
243*4882a593Smuzhiyun r = EV_UNIMPLEMENTED;
244*4882a593Smuzhiyun break;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun kvmppc_set_gpr(vcpu, 4, r2);
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun return r;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
252*4882a593Smuzhiyun
kvmppc_sanity_check(struct kvm_vcpu * vcpu)253*4882a593Smuzhiyun int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun int r = false;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun /* We have to know what CPU to virtualize */
258*4882a593Smuzhiyun if (!vcpu->arch.pvr)
259*4882a593Smuzhiyun goto out;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun /* PAPR only works with book3s_64 */
262*4882a593Smuzhiyun if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
263*4882a593Smuzhiyun goto out;
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun /* HV KVM can only do PAPR mode for now */
266*4882a593Smuzhiyun if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
267*4882a593Smuzhiyun goto out;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOKE_HV
270*4882a593Smuzhiyun if (!cpu_has_feature(CPU_FTR_EMB_HV))
271*4882a593Smuzhiyun goto out;
272*4882a593Smuzhiyun #endif
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun r = true;
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun out:
277*4882a593Smuzhiyun vcpu->arch.sane = r;
278*4882a593Smuzhiyun return r ? 0 : -EINVAL;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
281*4882a593Smuzhiyun
kvmppc_emulate_mmio(struct kvm_vcpu * vcpu)282*4882a593Smuzhiyun int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun enum emulation_result er;
285*4882a593Smuzhiyun int r;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun er = kvmppc_emulate_loadstore(vcpu);
288*4882a593Smuzhiyun switch (er) {
289*4882a593Smuzhiyun case EMULATE_DONE:
290*4882a593Smuzhiyun /* Future optimization: only reload non-volatiles if they were
291*4882a593Smuzhiyun * actually modified. */
292*4882a593Smuzhiyun r = RESUME_GUEST_NV;
293*4882a593Smuzhiyun break;
294*4882a593Smuzhiyun case EMULATE_AGAIN:
295*4882a593Smuzhiyun r = RESUME_GUEST;
296*4882a593Smuzhiyun break;
297*4882a593Smuzhiyun case EMULATE_DO_MMIO:
298*4882a593Smuzhiyun vcpu->run->exit_reason = KVM_EXIT_MMIO;
299*4882a593Smuzhiyun /* We must reload nonvolatiles because "update" load/store
300*4882a593Smuzhiyun * instructions modify register state. */
301*4882a593Smuzhiyun /* Future optimization: only reload non-volatiles if they were
302*4882a593Smuzhiyun * actually modified. */
303*4882a593Smuzhiyun r = RESUME_HOST_NV;
304*4882a593Smuzhiyun break;
305*4882a593Smuzhiyun case EMULATE_FAIL:
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun u32 last_inst;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
310*4882a593Smuzhiyun /* XXX Deliver Program interrupt to guest. */
311*4882a593Smuzhiyun pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
312*4882a593Smuzhiyun r = RESUME_HOST;
313*4882a593Smuzhiyun break;
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun default:
316*4882a593Smuzhiyun WARN_ON(1);
317*4882a593Smuzhiyun r = RESUME_GUEST;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun return r;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
323*4882a593Smuzhiyun
kvmppc_st(struct kvm_vcpu * vcpu,ulong * eaddr,int size,void * ptr,bool data)324*4882a593Smuzhiyun int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
325*4882a593Smuzhiyun bool data)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
328*4882a593Smuzhiyun struct kvmppc_pte pte;
329*4882a593Smuzhiyun int r = -EINVAL;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun vcpu->stat.st++;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr)
334*4882a593Smuzhiyun r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr,
335*4882a593Smuzhiyun size);
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun if ((!r) || (r == -EAGAIN))
338*4882a593Smuzhiyun return r;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
341*4882a593Smuzhiyun XLATE_WRITE, &pte);
342*4882a593Smuzhiyun if (r < 0)
343*4882a593Smuzhiyun return r;
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun *eaddr = pte.raddr;
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun if (!pte.may_write)
348*4882a593Smuzhiyun return -EPERM;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun /* Magic page override */
351*4882a593Smuzhiyun if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
352*4882a593Smuzhiyun ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
353*4882a593Smuzhiyun !(kvmppc_get_msr(vcpu) & MSR_PR)) {
354*4882a593Smuzhiyun void *magic = vcpu->arch.shared;
355*4882a593Smuzhiyun magic += pte.eaddr & 0xfff;
356*4882a593Smuzhiyun memcpy(magic, ptr, size);
357*4882a593Smuzhiyun return EMULATE_DONE;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
361*4882a593Smuzhiyun return EMULATE_DO_MMIO;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun return EMULATE_DONE;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvmppc_st);
366*4882a593Smuzhiyun
kvmppc_ld(struct kvm_vcpu * vcpu,ulong * eaddr,int size,void * ptr,bool data)367*4882a593Smuzhiyun int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
368*4882a593Smuzhiyun bool data)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
371*4882a593Smuzhiyun struct kvmppc_pte pte;
372*4882a593Smuzhiyun int rc = -EINVAL;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun vcpu->stat.ld++;
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr)
377*4882a593Smuzhiyun rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr,
378*4882a593Smuzhiyun size);
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun if ((!rc) || (rc == -EAGAIN))
381*4882a593Smuzhiyun return rc;
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
384*4882a593Smuzhiyun XLATE_READ, &pte);
385*4882a593Smuzhiyun if (rc)
386*4882a593Smuzhiyun return rc;
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun *eaddr = pte.raddr;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun if (!pte.may_read)
391*4882a593Smuzhiyun return -EPERM;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun if (!data && !pte.may_execute)
394*4882a593Smuzhiyun return -ENOEXEC;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun /* Magic page override */
397*4882a593Smuzhiyun if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
398*4882a593Smuzhiyun ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
399*4882a593Smuzhiyun !(kvmppc_get_msr(vcpu) & MSR_PR)) {
400*4882a593Smuzhiyun void *magic = vcpu->arch.shared;
401*4882a593Smuzhiyun magic += pte.eaddr & 0xfff;
402*4882a593Smuzhiyun memcpy(ptr, magic, size);
403*4882a593Smuzhiyun return EMULATE_DONE;
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
407*4882a593Smuzhiyun rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size);
408*4882a593Smuzhiyun srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
409*4882a593Smuzhiyun if (rc)
410*4882a593Smuzhiyun return EMULATE_DO_MMIO;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun return EMULATE_DONE;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvmppc_ld);
415*4882a593Smuzhiyun
kvm_arch_hardware_enable(void)416*4882a593Smuzhiyun int kvm_arch_hardware_enable(void)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun return 0;
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun
kvm_arch_hardware_setup(void * opaque)421*4882a593Smuzhiyun int kvm_arch_hardware_setup(void *opaque)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun return 0;
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun
kvm_arch_check_processor_compat(void * opaque)426*4882a593Smuzhiyun int kvm_arch_check_processor_compat(void *opaque)
427*4882a593Smuzhiyun {
428*4882a593Smuzhiyun return kvmppc_core_check_processor_compat();
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun
kvm_arch_init_vm(struct kvm * kvm,unsigned long type)431*4882a593Smuzhiyun int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun struct kvmppc_ops *kvm_ops = NULL;
434*4882a593Smuzhiyun /*
435*4882a593Smuzhiyun * if we have both HV and PR enabled, default is HV
436*4882a593Smuzhiyun */
437*4882a593Smuzhiyun if (type == 0) {
438*4882a593Smuzhiyun if (kvmppc_hv_ops)
439*4882a593Smuzhiyun kvm_ops = kvmppc_hv_ops;
440*4882a593Smuzhiyun else
441*4882a593Smuzhiyun kvm_ops = kvmppc_pr_ops;
442*4882a593Smuzhiyun if (!kvm_ops)
443*4882a593Smuzhiyun goto err_out;
444*4882a593Smuzhiyun } else if (type == KVM_VM_PPC_HV) {
445*4882a593Smuzhiyun if (!kvmppc_hv_ops)
446*4882a593Smuzhiyun goto err_out;
447*4882a593Smuzhiyun kvm_ops = kvmppc_hv_ops;
448*4882a593Smuzhiyun } else if (type == KVM_VM_PPC_PR) {
449*4882a593Smuzhiyun if (!kvmppc_pr_ops)
450*4882a593Smuzhiyun goto err_out;
451*4882a593Smuzhiyun kvm_ops = kvmppc_pr_ops;
452*4882a593Smuzhiyun } else
453*4882a593Smuzhiyun goto err_out;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
456*4882a593Smuzhiyun return -ENOENT;
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun kvm->arch.kvm_ops = kvm_ops;
459*4882a593Smuzhiyun return kvmppc_core_init_vm(kvm);
460*4882a593Smuzhiyun err_out:
461*4882a593Smuzhiyun return -EINVAL;
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun
kvm_arch_destroy_vm(struct kvm * kvm)464*4882a593Smuzhiyun void kvm_arch_destroy_vm(struct kvm *kvm)
465*4882a593Smuzhiyun {
466*4882a593Smuzhiyun unsigned int i;
467*4882a593Smuzhiyun struct kvm_vcpu *vcpu;
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun #ifdef CONFIG_KVM_XICS
470*4882a593Smuzhiyun /*
471*4882a593Smuzhiyun * We call kick_all_cpus_sync() to ensure that all
472*4882a593Smuzhiyun * CPUs have executed any pending IPIs before we
473*4882a593Smuzhiyun * continue and free VCPUs structures below.
474*4882a593Smuzhiyun */
475*4882a593Smuzhiyun if (is_kvmppc_hv_enabled(kvm))
476*4882a593Smuzhiyun kick_all_cpus_sync();
477*4882a593Smuzhiyun #endif
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun kvm_for_each_vcpu(i, vcpu, kvm)
480*4882a593Smuzhiyun kvm_vcpu_destroy(vcpu);
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun mutex_lock(&kvm->lock);
483*4882a593Smuzhiyun for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
484*4882a593Smuzhiyun kvm->vcpus[i] = NULL;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun atomic_set(&kvm->online_vcpus, 0);
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun kvmppc_core_destroy_vm(kvm);
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun mutex_unlock(&kvm->lock);
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun /* drop the module reference */
493*4882a593Smuzhiyun module_put(kvm->arch.kvm_ops->owner);
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun
kvm_vm_ioctl_check_extension(struct kvm * kvm,long ext)496*4882a593Smuzhiyun int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
497*4882a593Smuzhiyun {
498*4882a593Smuzhiyun int r;
499*4882a593Smuzhiyun /* Assume we're using HV mode when the HV module is loaded */
500*4882a593Smuzhiyun int hv_enabled = kvmppc_hv_ops ? 1 : 0;
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun if (kvm) {
503*4882a593Smuzhiyun /*
504*4882a593Smuzhiyun * Hooray - we know which VM type we're running on. Depend on
505*4882a593Smuzhiyun * that rather than the guess above.
506*4882a593Smuzhiyun */
507*4882a593Smuzhiyun hv_enabled = is_kvmppc_hv_enabled(kvm);
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun switch (ext) {
511*4882a593Smuzhiyun #ifdef CONFIG_BOOKE
512*4882a593Smuzhiyun case KVM_CAP_PPC_BOOKE_SREGS:
513*4882a593Smuzhiyun case KVM_CAP_PPC_BOOKE_WATCHDOG:
514*4882a593Smuzhiyun case KVM_CAP_PPC_EPR:
515*4882a593Smuzhiyun #else
516*4882a593Smuzhiyun case KVM_CAP_PPC_SEGSTATE:
517*4882a593Smuzhiyun case KVM_CAP_PPC_HIOR:
518*4882a593Smuzhiyun case KVM_CAP_PPC_PAPR:
519*4882a593Smuzhiyun #endif
520*4882a593Smuzhiyun case KVM_CAP_PPC_UNSET_IRQ:
521*4882a593Smuzhiyun case KVM_CAP_PPC_IRQ_LEVEL:
522*4882a593Smuzhiyun case KVM_CAP_ENABLE_CAP:
523*4882a593Smuzhiyun case KVM_CAP_ONE_REG:
524*4882a593Smuzhiyun case KVM_CAP_IOEVENTFD:
525*4882a593Smuzhiyun case KVM_CAP_DEVICE_CTRL:
526*4882a593Smuzhiyun case KVM_CAP_IMMEDIATE_EXIT:
527*4882a593Smuzhiyun case KVM_CAP_SET_GUEST_DEBUG:
528*4882a593Smuzhiyun r = 1;
529*4882a593Smuzhiyun break;
530*4882a593Smuzhiyun case KVM_CAP_PPC_GUEST_DEBUG_SSTEP:
531*4882a593Smuzhiyun case KVM_CAP_PPC_PAIRED_SINGLES:
532*4882a593Smuzhiyun case KVM_CAP_PPC_OSI:
533*4882a593Smuzhiyun case KVM_CAP_PPC_GET_PVINFO:
534*4882a593Smuzhiyun #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
535*4882a593Smuzhiyun case KVM_CAP_SW_TLB:
536*4882a593Smuzhiyun #endif
537*4882a593Smuzhiyun /* We support this only for PR */
538*4882a593Smuzhiyun r = !hv_enabled;
539*4882a593Smuzhiyun break;
540*4882a593Smuzhiyun #ifdef CONFIG_KVM_MPIC
541*4882a593Smuzhiyun case KVM_CAP_IRQ_MPIC:
542*4882a593Smuzhiyun r = 1;
543*4882a593Smuzhiyun break;
544*4882a593Smuzhiyun #endif
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
547*4882a593Smuzhiyun case KVM_CAP_SPAPR_TCE:
548*4882a593Smuzhiyun case KVM_CAP_SPAPR_TCE_64:
549*4882a593Smuzhiyun r = 1;
550*4882a593Smuzhiyun break;
551*4882a593Smuzhiyun case KVM_CAP_SPAPR_TCE_VFIO:
552*4882a593Smuzhiyun r = !!cpu_has_feature(CPU_FTR_HVMODE);
553*4882a593Smuzhiyun break;
554*4882a593Smuzhiyun case KVM_CAP_PPC_RTAS:
555*4882a593Smuzhiyun case KVM_CAP_PPC_FIXUP_HCALL:
556*4882a593Smuzhiyun case KVM_CAP_PPC_ENABLE_HCALL:
557*4882a593Smuzhiyun #ifdef CONFIG_KVM_XICS
558*4882a593Smuzhiyun case KVM_CAP_IRQ_XICS:
559*4882a593Smuzhiyun #endif
560*4882a593Smuzhiyun case KVM_CAP_PPC_GET_CPU_CHAR:
561*4882a593Smuzhiyun r = 1;
562*4882a593Smuzhiyun break;
563*4882a593Smuzhiyun #ifdef CONFIG_KVM_XIVE
564*4882a593Smuzhiyun case KVM_CAP_PPC_IRQ_XIVE:
565*4882a593Smuzhiyun /*
566*4882a593Smuzhiyun * We need XIVE to be enabled on the platform (implies
567*4882a593Smuzhiyun * a POWER9 processor) and the PowerNV platform, as
568*4882a593Smuzhiyun * nested is not yet supported.
569*4882a593Smuzhiyun */
570*4882a593Smuzhiyun r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE) &&
571*4882a593Smuzhiyun kvmppc_xive_native_supported();
572*4882a593Smuzhiyun break;
573*4882a593Smuzhiyun #endif
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun case KVM_CAP_PPC_ALLOC_HTAB:
576*4882a593Smuzhiyun r = hv_enabled;
577*4882a593Smuzhiyun break;
578*4882a593Smuzhiyun #endif /* CONFIG_PPC_BOOK3S_64 */
579*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
580*4882a593Smuzhiyun case KVM_CAP_PPC_SMT:
581*4882a593Smuzhiyun r = 0;
582*4882a593Smuzhiyun if (kvm) {
583*4882a593Smuzhiyun if (kvm->arch.emul_smt_mode > 1)
584*4882a593Smuzhiyun r = kvm->arch.emul_smt_mode;
585*4882a593Smuzhiyun else
586*4882a593Smuzhiyun r = kvm->arch.smt_mode;
587*4882a593Smuzhiyun } else if (hv_enabled) {
588*4882a593Smuzhiyun if (cpu_has_feature(CPU_FTR_ARCH_300))
589*4882a593Smuzhiyun r = 1;
590*4882a593Smuzhiyun else
591*4882a593Smuzhiyun r = threads_per_subcore;
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun break;
594*4882a593Smuzhiyun case KVM_CAP_PPC_SMT_POSSIBLE:
595*4882a593Smuzhiyun r = 1;
596*4882a593Smuzhiyun if (hv_enabled) {
597*4882a593Smuzhiyun if (!cpu_has_feature(CPU_FTR_ARCH_300))
598*4882a593Smuzhiyun r = ((threads_per_subcore << 1) - 1);
599*4882a593Smuzhiyun else
600*4882a593Smuzhiyun /* P9 can emulate dbells, so allow any mode */
601*4882a593Smuzhiyun r = 8 | 4 | 2 | 1;
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun break;
604*4882a593Smuzhiyun case KVM_CAP_PPC_RMA:
605*4882a593Smuzhiyun r = 0;
606*4882a593Smuzhiyun break;
607*4882a593Smuzhiyun case KVM_CAP_PPC_HWRNG:
608*4882a593Smuzhiyun r = kvmppc_hwrng_present();
609*4882a593Smuzhiyun break;
610*4882a593Smuzhiyun case KVM_CAP_PPC_MMU_RADIX:
611*4882a593Smuzhiyun r = !!(hv_enabled && radix_enabled());
612*4882a593Smuzhiyun break;
613*4882a593Smuzhiyun case KVM_CAP_PPC_MMU_HASH_V3:
614*4882a593Smuzhiyun r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300) &&
615*4882a593Smuzhiyun cpu_has_feature(CPU_FTR_HVMODE));
616*4882a593Smuzhiyun break;
617*4882a593Smuzhiyun case KVM_CAP_PPC_NESTED_HV:
618*4882a593Smuzhiyun r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
619*4882a593Smuzhiyun !kvmppc_hv_ops->enable_nested(NULL));
620*4882a593Smuzhiyun break;
621*4882a593Smuzhiyun #endif
622*4882a593Smuzhiyun case KVM_CAP_SYNC_MMU:
623*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
624*4882a593Smuzhiyun r = hv_enabled;
625*4882a593Smuzhiyun #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
626*4882a593Smuzhiyun r = 1;
627*4882a593Smuzhiyun #else
628*4882a593Smuzhiyun r = 0;
629*4882a593Smuzhiyun #endif
630*4882a593Smuzhiyun break;
631*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
632*4882a593Smuzhiyun case KVM_CAP_PPC_HTAB_FD:
633*4882a593Smuzhiyun r = hv_enabled;
634*4882a593Smuzhiyun break;
635*4882a593Smuzhiyun #endif
636*4882a593Smuzhiyun case KVM_CAP_NR_VCPUS:
637*4882a593Smuzhiyun /*
638*4882a593Smuzhiyun * Recommending a number of CPUs is somewhat arbitrary; we
639*4882a593Smuzhiyun * return the number of present CPUs for -HV (since a host
640*4882a593Smuzhiyun * will have secondary threads "offline"), and for other KVM
641*4882a593Smuzhiyun * implementations just count online CPUs.
642*4882a593Smuzhiyun */
643*4882a593Smuzhiyun if (hv_enabled)
644*4882a593Smuzhiyun r = num_present_cpus();
645*4882a593Smuzhiyun else
646*4882a593Smuzhiyun r = num_online_cpus();
647*4882a593Smuzhiyun break;
648*4882a593Smuzhiyun case KVM_CAP_MAX_VCPUS:
649*4882a593Smuzhiyun r = KVM_MAX_VCPUS;
650*4882a593Smuzhiyun break;
651*4882a593Smuzhiyun case KVM_CAP_MAX_VCPU_ID:
652*4882a593Smuzhiyun r = KVM_MAX_VCPU_ID;
653*4882a593Smuzhiyun break;
654*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
655*4882a593Smuzhiyun case KVM_CAP_PPC_GET_SMMU_INFO:
656*4882a593Smuzhiyun r = 1;
657*4882a593Smuzhiyun break;
658*4882a593Smuzhiyun case KVM_CAP_SPAPR_MULTITCE:
659*4882a593Smuzhiyun r = 1;
660*4882a593Smuzhiyun break;
661*4882a593Smuzhiyun case KVM_CAP_SPAPR_RESIZE_HPT:
662*4882a593Smuzhiyun r = !!hv_enabled;
663*4882a593Smuzhiyun break;
664*4882a593Smuzhiyun #endif
665*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
666*4882a593Smuzhiyun case KVM_CAP_PPC_FWNMI:
667*4882a593Smuzhiyun r = hv_enabled;
668*4882a593Smuzhiyun break;
669*4882a593Smuzhiyun #endif
670*4882a593Smuzhiyun #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
671*4882a593Smuzhiyun case KVM_CAP_PPC_HTM:
672*4882a593Smuzhiyun r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
673*4882a593Smuzhiyun (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
674*4882a593Smuzhiyun break;
675*4882a593Smuzhiyun #endif
676*4882a593Smuzhiyun #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
677*4882a593Smuzhiyun case KVM_CAP_PPC_SECURE_GUEST:
678*4882a593Smuzhiyun r = hv_enabled && kvmppc_hv_ops->enable_svm &&
679*4882a593Smuzhiyun !kvmppc_hv_ops->enable_svm(NULL);
680*4882a593Smuzhiyun break;
681*4882a593Smuzhiyun #endif
682*4882a593Smuzhiyun default:
683*4882a593Smuzhiyun r = 0;
684*4882a593Smuzhiyun break;
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun return r;
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun
kvm_arch_dev_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)690*4882a593Smuzhiyun long kvm_arch_dev_ioctl(struct file *filp,
691*4882a593Smuzhiyun unsigned int ioctl, unsigned long arg)
692*4882a593Smuzhiyun {
693*4882a593Smuzhiyun return -EINVAL;
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun
kvm_arch_free_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)696*4882a593Smuzhiyun void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
697*4882a593Smuzhiyun {
698*4882a593Smuzhiyun kvmppc_core_free_memslot(kvm, slot);
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun
kvm_arch_prepare_memory_region(struct kvm * kvm,struct kvm_memory_slot * memslot,const struct kvm_userspace_memory_region * mem,enum kvm_mr_change change)701*4882a593Smuzhiyun int kvm_arch_prepare_memory_region(struct kvm *kvm,
702*4882a593Smuzhiyun struct kvm_memory_slot *memslot,
703*4882a593Smuzhiyun const struct kvm_userspace_memory_region *mem,
704*4882a593Smuzhiyun enum kvm_mr_change change)
705*4882a593Smuzhiyun {
706*4882a593Smuzhiyun return kvmppc_core_prepare_memory_region(kvm, memslot, mem, change);
707*4882a593Smuzhiyun }
708*4882a593Smuzhiyun
kvm_arch_commit_memory_region(struct kvm * kvm,const struct kvm_userspace_memory_region * mem,struct kvm_memory_slot * old,const struct kvm_memory_slot * new,enum kvm_mr_change change)709*4882a593Smuzhiyun void kvm_arch_commit_memory_region(struct kvm *kvm,
710*4882a593Smuzhiyun const struct kvm_userspace_memory_region *mem,
711*4882a593Smuzhiyun struct kvm_memory_slot *old,
712*4882a593Smuzhiyun const struct kvm_memory_slot *new,
713*4882a593Smuzhiyun enum kvm_mr_change change)
714*4882a593Smuzhiyun {
715*4882a593Smuzhiyun kvmppc_core_commit_memory_region(kvm, mem, old, new, change);
716*4882a593Smuzhiyun }
717*4882a593Smuzhiyun
kvm_arch_flush_shadow_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)718*4882a593Smuzhiyun void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
719*4882a593Smuzhiyun struct kvm_memory_slot *slot)
720*4882a593Smuzhiyun {
721*4882a593Smuzhiyun kvmppc_core_flush_memslot(kvm, slot);
722*4882a593Smuzhiyun }
723*4882a593Smuzhiyun
kvm_arch_vcpu_precreate(struct kvm * kvm,unsigned int id)724*4882a593Smuzhiyun int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
725*4882a593Smuzhiyun {
726*4882a593Smuzhiyun return 0;
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun
kvmppc_decrementer_wakeup(struct hrtimer * timer)729*4882a593Smuzhiyun static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
730*4882a593Smuzhiyun {
731*4882a593Smuzhiyun struct kvm_vcpu *vcpu;
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
734*4882a593Smuzhiyun kvmppc_decrementer_func(vcpu);
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun return HRTIMER_NORESTART;
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun
kvm_arch_vcpu_create(struct kvm_vcpu * vcpu)739*4882a593Smuzhiyun int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
740*4882a593Smuzhiyun {
741*4882a593Smuzhiyun int err;
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
744*4882a593Smuzhiyun vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
745*4882a593Smuzhiyun vcpu->arch.dec_expires = get_tb();
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun #ifdef CONFIG_KVM_EXIT_TIMING
748*4882a593Smuzhiyun mutex_init(&vcpu->arch.exit_timing_lock);
749*4882a593Smuzhiyun #endif
750*4882a593Smuzhiyun err = kvmppc_subarch_vcpu_init(vcpu);
751*4882a593Smuzhiyun if (err)
752*4882a593Smuzhiyun return err;
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun err = kvmppc_core_vcpu_create(vcpu);
755*4882a593Smuzhiyun if (err)
756*4882a593Smuzhiyun goto out_vcpu_uninit;
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun vcpu->arch.waitp = &vcpu->wait;
759*4882a593Smuzhiyun kvmppc_create_vcpu_debugfs(vcpu, vcpu->vcpu_id);
760*4882a593Smuzhiyun return 0;
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun out_vcpu_uninit:
763*4882a593Smuzhiyun kvmppc_subarch_vcpu_uninit(vcpu);
764*4882a593Smuzhiyun return err;
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun
kvm_arch_vcpu_postcreate(struct kvm_vcpu * vcpu)767*4882a593Smuzhiyun void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
768*4882a593Smuzhiyun {
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun
kvm_arch_vcpu_destroy(struct kvm_vcpu * vcpu)771*4882a593Smuzhiyun void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
772*4882a593Smuzhiyun {
773*4882a593Smuzhiyun /* Make sure we're not using the vcpu anymore */
774*4882a593Smuzhiyun hrtimer_cancel(&vcpu->arch.dec_timer);
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun kvmppc_remove_vcpu_debugfs(vcpu);
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun switch (vcpu->arch.irq_type) {
779*4882a593Smuzhiyun case KVMPPC_IRQ_MPIC:
780*4882a593Smuzhiyun kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
781*4882a593Smuzhiyun break;
782*4882a593Smuzhiyun case KVMPPC_IRQ_XICS:
783*4882a593Smuzhiyun if (xics_on_xive())
784*4882a593Smuzhiyun kvmppc_xive_cleanup_vcpu(vcpu);
785*4882a593Smuzhiyun else
786*4882a593Smuzhiyun kvmppc_xics_free_icp(vcpu);
787*4882a593Smuzhiyun break;
788*4882a593Smuzhiyun case KVMPPC_IRQ_XIVE:
789*4882a593Smuzhiyun kvmppc_xive_native_cleanup_vcpu(vcpu);
790*4882a593Smuzhiyun break;
791*4882a593Smuzhiyun }
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun kvmppc_core_vcpu_free(vcpu);
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun kvmppc_subarch_vcpu_uninit(vcpu);
796*4882a593Smuzhiyun }
797*4882a593Smuzhiyun
kvm_cpu_has_pending_timer(struct kvm_vcpu * vcpu)798*4882a593Smuzhiyun int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
799*4882a593Smuzhiyun {
800*4882a593Smuzhiyun return kvmppc_core_pending_dec(vcpu);
801*4882a593Smuzhiyun }
802*4882a593Smuzhiyun
kvm_arch_vcpu_load(struct kvm_vcpu * vcpu,int cpu)803*4882a593Smuzhiyun void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
804*4882a593Smuzhiyun {
805*4882a593Smuzhiyun #ifdef CONFIG_BOOKE
806*4882a593Smuzhiyun /*
807*4882a593Smuzhiyun * vrsave (formerly usprg0) isn't used by Linux, but may
808*4882a593Smuzhiyun * be used by the guest.
809*4882a593Smuzhiyun *
810*4882a593Smuzhiyun * On non-booke this is associated with Altivec and
811*4882a593Smuzhiyun * is handled by code in book3s.c.
812*4882a593Smuzhiyun */
813*4882a593Smuzhiyun mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
814*4882a593Smuzhiyun #endif
815*4882a593Smuzhiyun kvmppc_core_vcpu_load(vcpu, cpu);
816*4882a593Smuzhiyun }
817*4882a593Smuzhiyun
kvm_arch_vcpu_put(struct kvm_vcpu * vcpu)818*4882a593Smuzhiyun void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
819*4882a593Smuzhiyun {
820*4882a593Smuzhiyun kvmppc_core_vcpu_put(vcpu);
821*4882a593Smuzhiyun #ifdef CONFIG_BOOKE
822*4882a593Smuzhiyun vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
823*4882a593Smuzhiyun #endif
824*4882a593Smuzhiyun }
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun /*
827*4882a593Smuzhiyun * irq_bypass_add_producer and irq_bypass_del_producer are only
828*4882a593Smuzhiyun * useful if the architecture supports PCI passthrough.
829*4882a593Smuzhiyun * irq_bypass_stop and irq_bypass_start are not needed and so
830*4882a593Smuzhiyun * kvm_ops are not defined for them.
831*4882a593Smuzhiyun */
kvm_arch_has_irq_bypass(void)832*4882a593Smuzhiyun bool kvm_arch_has_irq_bypass(void)
833*4882a593Smuzhiyun {
834*4882a593Smuzhiyun return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
835*4882a593Smuzhiyun (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun
kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer * cons,struct irq_bypass_producer * prod)838*4882a593Smuzhiyun int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
839*4882a593Smuzhiyun struct irq_bypass_producer *prod)
840*4882a593Smuzhiyun {
841*4882a593Smuzhiyun struct kvm_kernel_irqfd *irqfd =
842*4882a593Smuzhiyun container_of(cons, struct kvm_kernel_irqfd, consumer);
843*4882a593Smuzhiyun struct kvm *kvm = irqfd->kvm;
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun if (kvm->arch.kvm_ops->irq_bypass_add_producer)
846*4882a593Smuzhiyun return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun return 0;
849*4882a593Smuzhiyun }
850*4882a593Smuzhiyun
kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer * cons,struct irq_bypass_producer * prod)851*4882a593Smuzhiyun void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
852*4882a593Smuzhiyun struct irq_bypass_producer *prod)
853*4882a593Smuzhiyun {
854*4882a593Smuzhiyun struct kvm_kernel_irqfd *irqfd =
855*4882a593Smuzhiyun container_of(cons, struct kvm_kernel_irqfd, consumer);
856*4882a593Smuzhiyun struct kvm *kvm = irqfd->kvm;
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun if (kvm->arch.kvm_ops->irq_bypass_del_producer)
859*4882a593Smuzhiyun kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun #ifdef CONFIG_VSX
kvmppc_get_vsr_dword_offset(int index)863*4882a593Smuzhiyun static inline int kvmppc_get_vsr_dword_offset(int index)
864*4882a593Smuzhiyun {
865*4882a593Smuzhiyun int offset;
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun if ((index != 0) && (index != 1))
868*4882a593Smuzhiyun return -1;
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun #ifdef __BIG_ENDIAN
871*4882a593Smuzhiyun offset = index;
872*4882a593Smuzhiyun #else
873*4882a593Smuzhiyun offset = 1 - index;
874*4882a593Smuzhiyun #endif
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun return offset;
877*4882a593Smuzhiyun }
878*4882a593Smuzhiyun
kvmppc_get_vsr_word_offset(int index)879*4882a593Smuzhiyun static inline int kvmppc_get_vsr_word_offset(int index)
880*4882a593Smuzhiyun {
881*4882a593Smuzhiyun int offset;
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun if ((index > 3) || (index < 0))
884*4882a593Smuzhiyun return -1;
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun #ifdef __BIG_ENDIAN
887*4882a593Smuzhiyun offset = index;
888*4882a593Smuzhiyun #else
889*4882a593Smuzhiyun offset = 3 - index;
890*4882a593Smuzhiyun #endif
891*4882a593Smuzhiyun return offset;
892*4882a593Smuzhiyun }
893*4882a593Smuzhiyun
kvmppc_set_vsr_dword(struct kvm_vcpu * vcpu,u64 gpr)894*4882a593Smuzhiyun static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
895*4882a593Smuzhiyun u64 gpr)
896*4882a593Smuzhiyun {
897*4882a593Smuzhiyun union kvmppc_one_reg val;
898*4882a593Smuzhiyun int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
899*4882a593Smuzhiyun int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun if (offset == -1)
902*4882a593Smuzhiyun return;
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun if (index >= 32) {
905*4882a593Smuzhiyun val.vval = VCPU_VSX_VR(vcpu, index - 32);
906*4882a593Smuzhiyun val.vsxval[offset] = gpr;
907*4882a593Smuzhiyun VCPU_VSX_VR(vcpu, index - 32) = val.vval;
908*4882a593Smuzhiyun } else {
909*4882a593Smuzhiyun VCPU_VSX_FPR(vcpu, index, offset) = gpr;
910*4882a593Smuzhiyun }
911*4882a593Smuzhiyun }
912*4882a593Smuzhiyun
kvmppc_set_vsr_dword_dump(struct kvm_vcpu * vcpu,u64 gpr)913*4882a593Smuzhiyun static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
914*4882a593Smuzhiyun u64 gpr)
915*4882a593Smuzhiyun {
916*4882a593Smuzhiyun union kvmppc_one_reg val;
917*4882a593Smuzhiyun int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun if (index >= 32) {
920*4882a593Smuzhiyun val.vval = VCPU_VSX_VR(vcpu, index - 32);
921*4882a593Smuzhiyun val.vsxval[0] = gpr;
922*4882a593Smuzhiyun val.vsxval[1] = gpr;
923*4882a593Smuzhiyun VCPU_VSX_VR(vcpu, index - 32) = val.vval;
924*4882a593Smuzhiyun } else {
925*4882a593Smuzhiyun VCPU_VSX_FPR(vcpu, index, 0) = gpr;
926*4882a593Smuzhiyun VCPU_VSX_FPR(vcpu, index, 1) = gpr;
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun }
929*4882a593Smuzhiyun
kvmppc_set_vsr_word_dump(struct kvm_vcpu * vcpu,u32 gpr)930*4882a593Smuzhiyun static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
931*4882a593Smuzhiyun u32 gpr)
932*4882a593Smuzhiyun {
933*4882a593Smuzhiyun union kvmppc_one_reg val;
934*4882a593Smuzhiyun int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
935*4882a593Smuzhiyun
936*4882a593Smuzhiyun if (index >= 32) {
937*4882a593Smuzhiyun val.vsx32val[0] = gpr;
938*4882a593Smuzhiyun val.vsx32val[1] = gpr;
939*4882a593Smuzhiyun val.vsx32val[2] = gpr;
940*4882a593Smuzhiyun val.vsx32val[3] = gpr;
941*4882a593Smuzhiyun VCPU_VSX_VR(vcpu, index - 32) = val.vval;
942*4882a593Smuzhiyun } else {
943*4882a593Smuzhiyun val.vsx32val[0] = gpr;
944*4882a593Smuzhiyun val.vsx32val[1] = gpr;
945*4882a593Smuzhiyun VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
946*4882a593Smuzhiyun VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
947*4882a593Smuzhiyun }
948*4882a593Smuzhiyun }
949*4882a593Smuzhiyun
kvmppc_set_vsr_word(struct kvm_vcpu * vcpu,u32 gpr32)950*4882a593Smuzhiyun static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
951*4882a593Smuzhiyun u32 gpr32)
952*4882a593Smuzhiyun {
953*4882a593Smuzhiyun union kvmppc_one_reg val;
954*4882a593Smuzhiyun int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
955*4882a593Smuzhiyun int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
956*4882a593Smuzhiyun int dword_offset, word_offset;
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun if (offset == -1)
959*4882a593Smuzhiyun return;
960*4882a593Smuzhiyun
961*4882a593Smuzhiyun if (index >= 32) {
962*4882a593Smuzhiyun val.vval = VCPU_VSX_VR(vcpu, index - 32);
963*4882a593Smuzhiyun val.vsx32val[offset] = gpr32;
964*4882a593Smuzhiyun VCPU_VSX_VR(vcpu, index - 32) = val.vval;
965*4882a593Smuzhiyun } else {
966*4882a593Smuzhiyun dword_offset = offset / 2;
967*4882a593Smuzhiyun word_offset = offset % 2;
968*4882a593Smuzhiyun val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
969*4882a593Smuzhiyun val.vsx32val[word_offset] = gpr32;
970*4882a593Smuzhiyun VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
971*4882a593Smuzhiyun }
972*4882a593Smuzhiyun }
973*4882a593Smuzhiyun #endif /* CONFIG_VSX */
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun #ifdef CONFIG_ALTIVEC
kvmppc_get_vmx_offset_generic(struct kvm_vcpu * vcpu,int index,int element_size)976*4882a593Smuzhiyun static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu,
977*4882a593Smuzhiyun int index, int element_size)
978*4882a593Smuzhiyun {
979*4882a593Smuzhiyun int offset;
980*4882a593Smuzhiyun int elts = sizeof(vector128)/element_size;
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun if ((index < 0) || (index >= elts))
983*4882a593Smuzhiyun return -1;
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun if (kvmppc_need_byteswap(vcpu))
986*4882a593Smuzhiyun offset = elts - index - 1;
987*4882a593Smuzhiyun else
988*4882a593Smuzhiyun offset = index;
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun return offset;
991*4882a593Smuzhiyun }
992*4882a593Smuzhiyun
kvmppc_get_vmx_dword_offset(struct kvm_vcpu * vcpu,int index)993*4882a593Smuzhiyun static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu,
994*4882a593Smuzhiyun int index)
995*4882a593Smuzhiyun {
996*4882a593Smuzhiyun return kvmppc_get_vmx_offset_generic(vcpu, index, 8);
997*4882a593Smuzhiyun }
998*4882a593Smuzhiyun
kvmppc_get_vmx_word_offset(struct kvm_vcpu * vcpu,int index)999*4882a593Smuzhiyun static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu,
1000*4882a593Smuzhiyun int index)
1001*4882a593Smuzhiyun {
1002*4882a593Smuzhiyun return kvmppc_get_vmx_offset_generic(vcpu, index, 4);
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun
kvmppc_get_vmx_hword_offset(struct kvm_vcpu * vcpu,int index)1005*4882a593Smuzhiyun static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu,
1006*4882a593Smuzhiyun int index)
1007*4882a593Smuzhiyun {
1008*4882a593Smuzhiyun return kvmppc_get_vmx_offset_generic(vcpu, index, 2);
1009*4882a593Smuzhiyun }
1010*4882a593Smuzhiyun
kvmppc_get_vmx_byte_offset(struct kvm_vcpu * vcpu,int index)1011*4882a593Smuzhiyun static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu,
1012*4882a593Smuzhiyun int index)
1013*4882a593Smuzhiyun {
1014*4882a593Smuzhiyun return kvmppc_get_vmx_offset_generic(vcpu, index, 1);
1015*4882a593Smuzhiyun }
1016*4882a593Smuzhiyun
1017*4882a593Smuzhiyun
kvmppc_set_vmx_dword(struct kvm_vcpu * vcpu,u64 gpr)1018*4882a593Smuzhiyun static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
1019*4882a593Smuzhiyun u64 gpr)
1020*4882a593Smuzhiyun {
1021*4882a593Smuzhiyun union kvmppc_one_reg val;
1022*4882a593Smuzhiyun int offset = kvmppc_get_vmx_dword_offset(vcpu,
1023*4882a593Smuzhiyun vcpu->arch.mmio_vmx_offset);
1024*4882a593Smuzhiyun int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun if (offset == -1)
1027*4882a593Smuzhiyun return;
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun val.vval = VCPU_VSX_VR(vcpu, index);
1030*4882a593Smuzhiyun val.vsxval[offset] = gpr;
1031*4882a593Smuzhiyun VCPU_VSX_VR(vcpu, index) = val.vval;
1032*4882a593Smuzhiyun }
1033*4882a593Smuzhiyun
kvmppc_set_vmx_word(struct kvm_vcpu * vcpu,u32 gpr32)1034*4882a593Smuzhiyun static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
1035*4882a593Smuzhiyun u32 gpr32)
1036*4882a593Smuzhiyun {
1037*4882a593Smuzhiyun union kvmppc_one_reg val;
1038*4882a593Smuzhiyun int offset = kvmppc_get_vmx_word_offset(vcpu,
1039*4882a593Smuzhiyun vcpu->arch.mmio_vmx_offset);
1040*4882a593Smuzhiyun int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun if (offset == -1)
1043*4882a593Smuzhiyun return;
1044*4882a593Smuzhiyun
1045*4882a593Smuzhiyun val.vval = VCPU_VSX_VR(vcpu, index);
1046*4882a593Smuzhiyun val.vsx32val[offset] = gpr32;
1047*4882a593Smuzhiyun VCPU_VSX_VR(vcpu, index) = val.vval;
1048*4882a593Smuzhiyun }
1049*4882a593Smuzhiyun
kvmppc_set_vmx_hword(struct kvm_vcpu * vcpu,u16 gpr16)1050*4882a593Smuzhiyun static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
1051*4882a593Smuzhiyun u16 gpr16)
1052*4882a593Smuzhiyun {
1053*4882a593Smuzhiyun union kvmppc_one_reg val;
1054*4882a593Smuzhiyun int offset = kvmppc_get_vmx_hword_offset(vcpu,
1055*4882a593Smuzhiyun vcpu->arch.mmio_vmx_offset);
1056*4882a593Smuzhiyun int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun if (offset == -1)
1059*4882a593Smuzhiyun return;
1060*4882a593Smuzhiyun
1061*4882a593Smuzhiyun val.vval = VCPU_VSX_VR(vcpu, index);
1062*4882a593Smuzhiyun val.vsx16val[offset] = gpr16;
1063*4882a593Smuzhiyun VCPU_VSX_VR(vcpu, index) = val.vval;
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun
kvmppc_set_vmx_byte(struct kvm_vcpu * vcpu,u8 gpr8)1066*4882a593Smuzhiyun static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
1067*4882a593Smuzhiyun u8 gpr8)
1068*4882a593Smuzhiyun {
1069*4882a593Smuzhiyun union kvmppc_one_reg val;
1070*4882a593Smuzhiyun int offset = kvmppc_get_vmx_byte_offset(vcpu,
1071*4882a593Smuzhiyun vcpu->arch.mmio_vmx_offset);
1072*4882a593Smuzhiyun int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun if (offset == -1)
1075*4882a593Smuzhiyun return;
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun val.vval = VCPU_VSX_VR(vcpu, index);
1078*4882a593Smuzhiyun val.vsx8val[offset] = gpr8;
1079*4882a593Smuzhiyun VCPU_VSX_VR(vcpu, index) = val.vval;
1080*4882a593Smuzhiyun }
1081*4882a593Smuzhiyun #endif /* CONFIG_ALTIVEC */
1082*4882a593Smuzhiyun
1083*4882a593Smuzhiyun #ifdef CONFIG_PPC_FPU
sp_to_dp(u32 fprs)1084*4882a593Smuzhiyun static inline u64 sp_to_dp(u32 fprs)
1085*4882a593Smuzhiyun {
1086*4882a593Smuzhiyun u64 fprd;
1087*4882a593Smuzhiyun
1088*4882a593Smuzhiyun preempt_disable();
1089*4882a593Smuzhiyun enable_kernel_fp();
1090*4882a593Smuzhiyun asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd) : "m" (fprs)
1091*4882a593Smuzhiyun : "fr0");
1092*4882a593Smuzhiyun preempt_enable();
1093*4882a593Smuzhiyun return fprd;
1094*4882a593Smuzhiyun }
1095*4882a593Smuzhiyun
dp_to_sp(u64 fprd)1096*4882a593Smuzhiyun static inline u32 dp_to_sp(u64 fprd)
1097*4882a593Smuzhiyun {
1098*4882a593Smuzhiyun u32 fprs;
1099*4882a593Smuzhiyun
1100*4882a593Smuzhiyun preempt_disable();
1101*4882a593Smuzhiyun enable_kernel_fp();
1102*4882a593Smuzhiyun asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs) : "m" (fprd)
1103*4882a593Smuzhiyun : "fr0");
1104*4882a593Smuzhiyun preempt_enable();
1105*4882a593Smuzhiyun return fprs;
1106*4882a593Smuzhiyun }
1107*4882a593Smuzhiyun
1108*4882a593Smuzhiyun #else
1109*4882a593Smuzhiyun #define sp_to_dp(x) (x)
1110*4882a593Smuzhiyun #define dp_to_sp(x) (x)
1111*4882a593Smuzhiyun #endif /* CONFIG_PPC_FPU */
1112*4882a593Smuzhiyun
kvmppc_complete_mmio_load(struct kvm_vcpu * vcpu)1113*4882a593Smuzhiyun static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu)
1114*4882a593Smuzhiyun {
1115*4882a593Smuzhiyun struct kvm_run *run = vcpu->run;
1116*4882a593Smuzhiyun u64 gpr;
1117*4882a593Smuzhiyun
1118*4882a593Smuzhiyun if (run->mmio.len > sizeof(gpr)) {
1119*4882a593Smuzhiyun printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
1120*4882a593Smuzhiyun return;
1121*4882a593Smuzhiyun }
1122*4882a593Smuzhiyun
1123*4882a593Smuzhiyun if (!vcpu->arch.mmio_host_swabbed) {
1124*4882a593Smuzhiyun switch (run->mmio.len) {
1125*4882a593Smuzhiyun case 8: gpr = *(u64 *)run->mmio.data; break;
1126*4882a593Smuzhiyun case 4: gpr = *(u32 *)run->mmio.data; break;
1127*4882a593Smuzhiyun case 2: gpr = *(u16 *)run->mmio.data; break;
1128*4882a593Smuzhiyun case 1: gpr = *(u8 *)run->mmio.data; break;
1129*4882a593Smuzhiyun }
1130*4882a593Smuzhiyun } else {
1131*4882a593Smuzhiyun switch (run->mmio.len) {
1132*4882a593Smuzhiyun case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
1133*4882a593Smuzhiyun case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
1134*4882a593Smuzhiyun case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
1135*4882a593Smuzhiyun case 1: gpr = *(u8 *)run->mmio.data; break;
1136*4882a593Smuzhiyun }
1137*4882a593Smuzhiyun }
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun /* conversion between single and double precision */
1140*4882a593Smuzhiyun if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
1141*4882a593Smuzhiyun gpr = sp_to_dp(gpr);
1142*4882a593Smuzhiyun
1143*4882a593Smuzhiyun if (vcpu->arch.mmio_sign_extend) {
1144*4882a593Smuzhiyun switch (run->mmio.len) {
1145*4882a593Smuzhiyun #ifdef CONFIG_PPC64
1146*4882a593Smuzhiyun case 4:
1147*4882a593Smuzhiyun gpr = (s64)(s32)gpr;
1148*4882a593Smuzhiyun break;
1149*4882a593Smuzhiyun #endif
1150*4882a593Smuzhiyun case 2:
1151*4882a593Smuzhiyun gpr = (s64)(s16)gpr;
1152*4882a593Smuzhiyun break;
1153*4882a593Smuzhiyun case 1:
1154*4882a593Smuzhiyun gpr = (s64)(s8)gpr;
1155*4882a593Smuzhiyun break;
1156*4882a593Smuzhiyun }
1157*4882a593Smuzhiyun }
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
1160*4882a593Smuzhiyun case KVM_MMIO_REG_GPR:
1161*4882a593Smuzhiyun kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
1162*4882a593Smuzhiyun break;
1163*4882a593Smuzhiyun case KVM_MMIO_REG_FPR:
1164*4882a593Smuzhiyun if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1165*4882a593Smuzhiyun vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1168*4882a593Smuzhiyun break;
1169*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S
1170*4882a593Smuzhiyun case KVM_MMIO_REG_QPR:
1171*4882a593Smuzhiyun vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1172*4882a593Smuzhiyun break;
1173*4882a593Smuzhiyun case KVM_MMIO_REG_FQPR:
1174*4882a593Smuzhiyun VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1175*4882a593Smuzhiyun vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1176*4882a593Smuzhiyun break;
1177*4882a593Smuzhiyun #endif
1178*4882a593Smuzhiyun #ifdef CONFIG_VSX
1179*4882a593Smuzhiyun case KVM_MMIO_REG_VSX:
1180*4882a593Smuzhiyun if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1181*4882a593Smuzhiyun vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
1182*4882a593Smuzhiyun
1183*4882a593Smuzhiyun if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD)
1184*4882a593Smuzhiyun kvmppc_set_vsr_dword(vcpu, gpr);
1185*4882a593Smuzhiyun else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD)
1186*4882a593Smuzhiyun kvmppc_set_vsr_word(vcpu, gpr);
1187*4882a593Smuzhiyun else if (vcpu->arch.mmio_copy_type ==
1188*4882a593Smuzhiyun KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
1189*4882a593Smuzhiyun kvmppc_set_vsr_dword_dump(vcpu, gpr);
1190*4882a593Smuzhiyun else if (vcpu->arch.mmio_copy_type ==
1191*4882a593Smuzhiyun KVMPPC_VSX_COPY_WORD_LOAD_DUMP)
1192*4882a593Smuzhiyun kvmppc_set_vsr_word_dump(vcpu, gpr);
1193*4882a593Smuzhiyun break;
1194*4882a593Smuzhiyun #endif
1195*4882a593Smuzhiyun #ifdef CONFIG_ALTIVEC
1196*4882a593Smuzhiyun case KVM_MMIO_REG_VMX:
1197*4882a593Smuzhiyun if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1198*4882a593Smuzhiyun vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
1199*4882a593Smuzhiyun
1200*4882a593Smuzhiyun if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD)
1201*4882a593Smuzhiyun kvmppc_set_vmx_dword(vcpu, gpr);
1202*4882a593Smuzhiyun else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD)
1203*4882a593Smuzhiyun kvmppc_set_vmx_word(vcpu, gpr);
1204*4882a593Smuzhiyun else if (vcpu->arch.mmio_copy_type ==
1205*4882a593Smuzhiyun KVMPPC_VMX_COPY_HWORD)
1206*4882a593Smuzhiyun kvmppc_set_vmx_hword(vcpu, gpr);
1207*4882a593Smuzhiyun else if (vcpu->arch.mmio_copy_type ==
1208*4882a593Smuzhiyun KVMPPC_VMX_COPY_BYTE)
1209*4882a593Smuzhiyun kvmppc_set_vmx_byte(vcpu, gpr);
1210*4882a593Smuzhiyun break;
1211*4882a593Smuzhiyun #endif
1212*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1213*4882a593Smuzhiyun case KVM_MMIO_REG_NESTED_GPR:
1214*4882a593Smuzhiyun if (kvmppc_need_byteswap(vcpu))
1215*4882a593Smuzhiyun gpr = swab64(gpr);
1216*4882a593Smuzhiyun kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr,
1217*4882a593Smuzhiyun sizeof(gpr));
1218*4882a593Smuzhiyun break;
1219*4882a593Smuzhiyun #endif
1220*4882a593Smuzhiyun default:
1221*4882a593Smuzhiyun BUG();
1222*4882a593Smuzhiyun }
1223*4882a593Smuzhiyun }
1224*4882a593Smuzhiyun
__kvmppc_handle_load(struct kvm_vcpu * vcpu,unsigned int rt,unsigned int bytes,int is_default_endian,int sign_extend)1225*4882a593Smuzhiyun static int __kvmppc_handle_load(struct kvm_vcpu *vcpu,
1226*4882a593Smuzhiyun unsigned int rt, unsigned int bytes,
1227*4882a593Smuzhiyun int is_default_endian, int sign_extend)
1228*4882a593Smuzhiyun {
1229*4882a593Smuzhiyun struct kvm_run *run = vcpu->run;
1230*4882a593Smuzhiyun int idx, ret;
1231*4882a593Smuzhiyun bool host_swabbed;
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun /* Pity C doesn't have a logical XOR operator */
1234*4882a593Smuzhiyun if (kvmppc_need_byteswap(vcpu)) {
1235*4882a593Smuzhiyun host_swabbed = is_default_endian;
1236*4882a593Smuzhiyun } else {
1237*4882a593Smuzhiyun host_swabbed = !is_default_endian;
1238*4882a593Smuzhiyun }
1239*4882a593Smuzhiyun
1240*4882a593Smuzhiyun if (bytes > sizeof(run->mmio.data)) {
1241*4882a593Smuzhiyun printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
1242*4882a593Smuzhiyun run->mmio.len);
1243*4882a593Smuzhiyun }
1244*4882a593Smuzhiyun
1245*4882a593Smuzhiyun run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1246*4882a593Smuzhiyun run->mmio.len = bytes;
1247*4882a593Smuzhiyun run->mmio.is_write = 0;
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun vcpu->arch.io_gpr = rt;
1250*4882a593Smuzhiyun vcpu->arch.mmio_host_swabbed = host_swabbed;
1251*4882a593Smuzhiyun vcpu->mmio_needed = 1;
1252*4882a593Smuzhiyun vcpu->mmio_is_write = 0;
1253*4882a593Smuzhiyun vcpu->arch.mmio_sign_extend = sign_extend;
1254*4882a593Smuzhiyun
1255*4882a593Smuzhiyun idx = srcu_read_lock(&vcpu->kvm->srcu);
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyun ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1258*4882a593Smuzhiyun bytes, &run->mmio.data);
1259*4882a593Smuzhiyun
1260*4882a593Smuzhiyun srcu_read_unlock(&vcpu->kvm->srcu, idx);
1261*4882a593Smuzhiyun
1262*4882a593Smuzhiyun if (!ret) {
1263*4882a593Smuzhiyun kvmppc_complete_mmio_load(vcpu);
1264*4882a593Smuzhiyun vcpu->mmio_needed = 0;
1265*4882a593Smuzhiyun return EMULATE_DONE;
1266*4882a593Smuzhiyun }
1267*4882a593Smuzhiyun
1268*4882a593Smuzhiyun return EMULATE_DO_MMIO;
1269*4882a593Smuzhiyun }
1270*4882a593Smuzhiyun
kvmppc_handle_load(struct kvm_vcpu * vcpu,unsigned int rt,unsigned int bytes,int is_default_endian)1271*4882a593Smuzhiyun int kvmppc_handle_load(struct kvm_vcpu *vcpu,
1272*4882a593Smuzhiyun unsigned int rt, unsigned int bytes,
1273*4882a593Smuzhiyun int is_default_endian)
1274*4882a593Smuzhiyun {
1275*4882a593Smuzhiyun return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0);
1276*4882a593Smuzhiyun }
1277*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvmppc_handle_load);
1278*4882a593Smuzhiyun
1279*4882a593Smuzhiyun /* Same as above, but sign extends */
kvmppc_handle_loads(struct kvm_vcpu * vcpu,unsigned int rt,unsigned int bytes,int is_default_endian)1280*4882a593Smuzhiyun int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
1281*4882a593Smuzhiyun unsigned int rt, unsigned int bytes,
1282*4882a593Smuzhiyun int is_default_endian)
1283*4882a593Smuzhiyun {
1284*4882a593Smuzhiyun return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1);
1285*4882a593Smuzhiyun }
1286*4882a593Smuzhiyun
1287*4882a593Smuzhiyun #ifdef CONFIG_VSX
kvmppc_handle_vsx_load(struct kvm_vcpu * vcpu,unsigned int rt,unsigned int bytes,int is_default_endian,int mmio_sign_extend)1288*4882a593Smuzhiyun int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
1289*4882a593Smuzhiyun unsigned int rt, unsigned int bytes,
1290*4882a593Smuzhiyun int is_default_endian, int mmio_sign_extend)
1291*4882a593Smuzhiyun {
1292*4882a593Smuzhiyun enum emulation_result emulated = EMULATE_DONE;
1293*4882a593Smuzhiyun
1294*4882a593Smuzhiyun /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1295*4882a593Smuzhiyun if (vcpu->arch.mmio_vsx_copy_nums > 4)
1296*4882a593Smuzhiyun return EMULATE_FAIL;
1297*4882a593Smuzhiyun
1298*4882a593Smuzhiyun while (vcpu->arch.mmio_vsx_copy_nums) {
1299*4882a593Smuzhiyun emulated = __kvmppc_handle_load(vcpu, rt, bytes,
1300*4882a593Smuzhiyun is_default_endian, mmio_sign_extend);
1301*4882a593Smuzhiyun
1302*4882a593Smuzhiyun if (emulated != EMULATE_DONE)
1303*4882a593Smuzhiyun break;
1304*4882a593Smuzhiyun
1305*4882a593Smuzhiyun vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1306*4882a593Smuzhiyun
1307*4882a593Smuzhiyun vcpu->arch.mmio_vsx_copy_nums--;
1308*4882a593Smuzhiyun vcpu->arch.mmio_vsx_offset++;
1309*4882a593Smuzhiyun }
1310*4882a593Smuzhiyun return emulated;
1311*4882a593Smuzhiyun }
1312*4882a593Smuzhiyun #endif /* CONFIG_VSX */
1313*4882a593Smuzhiyun
kvmppc_handle_store(struct kvm_vcpu * vcpu,u64 val,unsigned int bytes,int is_default_endian)1314*4882a593Smuzhiyun int kvmppc_handle_store(struct kvm_vcpu *vcpu,
1315*4882a593Smuzhiyun u64 val, unsigned int bytes, int is_default_endian)
1316*4882a593Smuzhiyun {
1317*4882a593Smuzhiyun struct kvm_run *run = vcpu->run;
1318*4882a593Smuzhiyun void *data = run->mmio.data;
1319*4882a593Smuzhiyun int idx, ret;
1320*4882a593Smuzhiyun bool host_swabbed;
1321*4882a593Smuzhiyun
1322*4882a593Smuzhiyun /* Pity C doesn't have a logical XOR operator */
1323*4882a593Smuzhiyun if (kvmppc_need_byteswap(vcpu)) {
1324*4882a593Smuzhiyun host_swabbed = is_default_endian;
1325*4882a593Smuzhiyun } else {
1326*4882a593Smuzhiyun host_swabbed = !is_default_endian;
1327*4882a593Smuzhiyun }
1328*4882a593Smuzhiyun
1329*4882a593Smuzhiyun if (bytes > sizeof(run->mmio.data)) {
1330*4882a593Smuzhiyun printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
1331*4882a593Smuzhiyun run->mmio.len);
1332*4882a593Smuzhiyun }
1333*4882a593Smuzhiyun
1334*4882a593Smuzhiyun run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1335*4882a593Smuzhiyun run->mmio.len = bytes;
1336*4882a593Smuzhiyun run->mmio.is_write = 1;
1337*4882a593Smuzhiyun vcpu->mmio_needed = 1;
1338*4882a593Smuzhiyun vcpu->mmio_is_write = 1;
1339*4882a593Smuzhiyun
1340*4882a593Smuzhiyun if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
1341*4882a593Smuzhiyun val = dp_to_sp(val);
1342*4882a593Smuzhiyun
1343*4882a593Smuzhiyun /* Store the value at the lowest bytes in 'data'. */
1344*4882a593Smuzhiyun if (!host_swabbed) {
1345*4882a593Smuzhiyun switch (bytes) {
1346*4882a593Smuzhiyun case 8: *(u64 *)data = val; break;
1347*4882a593Smuzhiyun case 4: *(u32 *)data = val; break;
1348*4882a593Smuzhiyun case 2: *(u16 *)data = val; break;
1349*4882a593Smuzhiyun case 1: *(u8 *)data = val; break;
1350*4882a593Smuzhiyun }
1351*4882a593Smuzhiyun } else {
1352*4882a593Smuzhiyun switch (bytes) {
1353*4882a593Smuzhiyun case 8: *(u64 *)data = swab64(val); break;
1354*4882a593Smuzhiyun case 4: *(u32 *)data = swab32(val); break;
1355*4882a593Smuzhiyun case 2: *(u16 *)data = swab16(val); break;
1356*4882a593Smuzhiyun case 1: *(u8 *)data = val; break;
1357*4882a593Smuzhiyun }
1358*4882a593Smuzhiyun }
1359*4882a593Smuzhiyun
1360*4882a593Smuzhiyun idx = srcu_read_lock(&vcpu->kvm->srcu);
1361*4882a593Smuzhiyun
1362*4882a593Smuzhiyun ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1363*4882a593Smuzhiyun bytes, &run->mmio.data);
1364*4882a593Smuzhiyun
1365*4882a593Smuzhiyun srcu_read_unlock(&vcpu->kvm->srcu, idx);
1366*4882a593Smuzhiyun
1367*4882a593Smuzhiyun if (!ret) {
1368*4882a593Smuzhiyun vcpu->mmio_needed = 0;
1369*4882a593Smuzhiyun return EMULATE_DONE;
1370*4882a593Smuzhiyun }
1371*4882a593Smuzhiyun
1372*4882a593Smuzhiyun return EMULATE_DO_MMIO;
1373*4882a593Smuzhiyun }
1374*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvmppc_handle_store);
1375*4882a593Smuzhiyun
1376*4882a593Smuzhiyun #ifdef CONFIG_VSX
kvmppc_get_vsr_data(struct kvm_vcpu * vcpu,int rs,u64 * val)1377*4882a593Smuzhiyun static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
1378*4882a593Smuzhiyun {
1379*4882a593Smuzhiyun u32 dword_offset, word_offset;
1380*4882a593Smuzhiyun union kvmppc_one_reg reg;
1381*4882a593Smuzhiyun int vsx_offset = 0;
1382*4882a593Smuzhiyun int copy_type = vcpu->arch.mmio_copy_type;
1383*4882a593Smuzhiyun int result = 0;
1384*4882a593Smuzhiyun
1385*4882a593Smuzhiyun switch (copy_type) {
1386*4882a593Smuzhiyun case KVMPPC_VSX_COPY_DWORD:
1387*4882a593Smuzhiyun vsx_offset =
1388*4882a593Smuzhiyun kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
1389*4882a593Smuzhiyun
1390*4882a593Smuzhiyun if (vsx_offset == -1) {
1391*4882a593Smuzhiyun result = -1;
1392*4882a593Smuzhiyun break;
1393*4882a593Smuzhiyun }
1394*4882a593Smuzhiyun
1395*4882a593Smuzhiyun if (rs < 32) {
1396*4882a593Smuzhiyun *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
1397*4882a593Smuzhiyun } else {
1398*4882a593Smuzhiyun reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1399*4882a593Smuzhiyun *val = reg.vsxval[vsx_offset];
1400*4882a593Smuzhiyun }
1401*4882a593Smuzhiyun break;
1402*4882a593Smuzhiyun
1403*4882a593Smuzhiyun case KVMPPC_VSX_COPY_WORD:
1404*4882a593Smuzhiyun vsx_offset =
1405*4882a593Smuzhiyun kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
1406*4882a593Smuzhiyun
1407*4882a593Smuzhiyun if (vsx_offset == -1) {
1408*4882a593Smuzhiyun result = -1;
1409*4882a593Smuzhiyun break;
1410*4882a593Smuzhiyun }
1411*4882a593Smuzhiyun
1412*4882a593Smuzhiyun if (rs < 32) {
1413*4882a593Smuzhiyun dword_offset = vsx_offset / 2;
1414*4882a593Smuzhiyun word_offset = vsx_offset % 2;
1415*4882a593Smuzhiyun reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
1416*4882a593Smuzhiyun *val = reg.vsx32val[word_offset];
1417*4882a593Smuzhiyun } else {
1418*4882a593Smuzhiyun reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1419*4882a593Smuzhiyun *val = reg.vsx32val[vsx_offset];
1420*4882a593Smuzhiyun }
1421*4882a593Smuzhiyun break;
1422*4882a593Smuzhiyun
1423*4882a593Smuzhiyun default:
1424*4882a593Smuzhiyun result = -1;
1425*4882a593Smuzhiyun break;
1426*4882a593Smuzhiyun }
1427*4882a593Smuzhiyun
1428*4882a593Smuzhiyun return result;
1429*4882a593Smuzhiyun }
1430*4882a593Smuzhiyun
kvmppc_handle_vsx_store(struct kvm_vcpu * vcpu,int rs,unsigned int bytes,int is_default_endian)1431*4882a593Smuzhiyun int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
1432*4882a593Smuzhiyun int rs, unsigned int bytes, int is_default_endian)
1433*4882a593Smuzhiyun {
1434*4882a593Smuzhiyun u64 val;
1435*4882a593Smuzhiyun enum emulation_result emulated = EMULATE_DONE;
1436*4882a593Smuzhiyun
1437*4882a593Smuzhiyun vcpu->arch.io_gpr = rs;
1438*4882a593Smuzhiyun
1439*4882a593Smuzhiyun /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1440*4882a593Smuzhiyun if (vcpu->arch.mmio_vsx_copy_nums > 4)
1441*4882a593Smuzhiyun return EMULATE_FAIL;
1442*4882a593Smuzhiyun
1443*4882a593Smuzhiyun while (vcpu->arch.mmio_vsx_copy_nums) {
1444*4882a593Smuzhiyun if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
1445*4882a593Smuzhiyun return EMULATE_FAIL;
1446*4882a593Smuzhiyun
1447*4882a593Smuzhiyun emulated = kvmppc_handle_store(vcpu,
1448*4882a593Smuzhiyun val, bytes, is_default_endian);
1449*4882a593Smuzhiyun
1450*4882a593Smuzhiyun if (emulated != EMULATE_DONE)
1451*4882a593Smuzhiyun break;
1452*4882a593Smuzhiyun
1453*4882a593Smuzhiyun vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1454*4882a593Smuzhiyun
1455*4882a593Smuzhiyun vcpu->arch.mmio_vsx_copy_nums--;
1456*4882a593Smuzhiyun vcpu->arch.mmio_vsx_offset++;
1457*4882a593Smuzhiyun }
1458*4882a593Smuzhiyun
1459*4882a593Smuzhiyun return emulated;
1460*4882a593Smuzhiyun }
1461*4882a593Smuzhiyun
kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu * vcpu)1462*4882a593Smuzhiyun static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu)
1463*4882a593Smuzhiyun {
1464*4882a593Smuzhiyun struct kvm_run *run = vcpu->run;
1465*4882a593Smuzhiyun enum emulation_result emulated = EMULATE_FAIL;
1466*4882a593Smuzhiyun int r;
1467*4882a593Smuzhiyun
1468*4882a593Smuzhiyun vcpu->arch.paddr_accessed += run->mmio.len;
1469*4882a593Smuzhiyun
1470*4882a593Smuzhiyun if (!vcpu->mmio_is_write) {
1471*4882a593Smuzhiyun emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr,
1472*4882a593Smuzhiyun run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
1473*4882a593Smuzhiyun } else {
1474*4882a593Smuzhiyun emulated = kvmppc_handle_vsx_store(vcpu,
1475*4882a593Smuzhiyun vcpu->arch.io_gpr, run->mmio.len, 1);
1476*4882a593Smuzhiyun }
1477*4882a593Smuzhiyun
1478*4882a593Smuzhiyun switch (emulated) {
1479*4882a593Smuzhiyun case EMULATE_DO_MMIO:
1480*4882a593Smuzhiyun run->exit_reason = KVM_EXIT_MMIO;
1481*4882a593Smuzhiyun r = RESUME_HOST;
1482*4882a593Smuzhiyun break;
1483*4882a593Smuzhiyun case EMULATE_FAIL:
1484*4882a593Smuzhiyun pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
1485*4882a593Smuzhiyun run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1486*4882a593Smuzhiyun run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1487*4882a593Smuzhiyun r = RESUME_HOST;
1488*4882a593Smuzhiyun break;
1489*4882a593Smuzhiyun default:
1490*4882a593Smuzhiyun r = RESUME_GUEST;
1491*4882a593Smuzhiyun break;
1492*4882a593Smuzhiyun }
1493*4882a593Smuzhiyun return r;
1494*4882a593Smuzhiyun }
1495*4882a593Smuzhiyun #endif /* CONFIG_VSX */
1496*4882a593Smuzhiyun
1497*4882a593Smuzhiyun #ifdef CONFIG_ALTIVEC
kvmppc_handle_vmx_load(struct kvm_vcpu * vcpu,unsigned int rt,unsigned int bytes,int is_default_endian)1498*4882a593Smuzhiyun int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
1499*4882a593Smuzhiyun unsigned int rt, unsigned int bytes, int is_default_endian)
1500*4882a593Smuzhiyun {
1501*4882a593Smuzhiyun enum emulation_result emulated = EMULATE_DONE;
1502*4882a593Smuzhiyun
1503*4882a593Smuzhiyun if (vcpu->arch.mmio_vmx_copy_nums > 2)
1504*4882a593Smuzhiyun return EMULATE_FAIL;
1505*4882a593Smuzhiyun
1506*4882a593Smuzhiyun while (vcpu->arch.mmio_vmx_copy_nums) {
1507*4882a593Smuzhiyun emulated = __kvmppc_handle_load(vcpu, rt, bytes,
1508*4882a593Smuzhiyun is_default_endian, 0);
1509*4882a593Smuzhiyun
1510*4882a593Smuzhiyun if (emulated != EMULATE_DONE)
1511*4882a593Smuzhiyun break;
1512*4882a593Smuzhiyun
1513*4882a593Smuzhiyun vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1514*4882a593Smuzhiyun vcpu->arch.mmio_vmx_copy_nums--;
1515*4882a593Smuzhiyun vcpu->arch.mmio_vmx_offset++;
1516*4882a593Smuzhiyun }
1517*4882a593Smuzhiyun
1518*4882a593Smuzhiyun return emulated;
1519*4882a593Smuzhiyun }
1520*4882a593Smuzhiyun
kvmppc_get_vmx_dword(struct kvm_vcpu * vcpu,int index,u64 * val)1521*4882a593Smuzhiyun static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
1522*4882a593Smuzhiyun {
1523*4882a593Smuzhiyun union kvmppc_one_reg reg;
1524*4882a593Smuzhiyun int vmx_offset = 0;
1525*4882a593Smuzhiyun int result = 0;
1526*4882a593Smuzhiyun
1527*4882a593Smuzhiyun vmx_offset =
1528*4882a593Smuzhiyun kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1529*4882a593Smuzhiyun
1530*4882a593Smuzhiyun if (vmx_offset == -1)
1531*4882a593Smuzhiyun return -1;
1532*4882a593Smuzhiyun
1533*4882a593Smuzhiyun reg.vval = VCPU_VSX_VR(vcpu, index);
1534*4882a593Smuzhiyun *val = reg.vsxval[vmx_offset];
1535*4882a593Smuzhiyun
1536*4882a593Smuzhiyun return result;
1537*4882a593Smuzhiyun }
1538*4882a593Smuzhiyun
kvmppc_get_vmx_word(struct kvm_vcpu * vcpu,int index,u64 * val)1539*4882a593Smuzhiyun static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
1540*4882a593Smuzhiyun {
1541*4882a593Smuzhiyun union kvmppc_one_reg reg;
1542*4882a593Smuzhiyun int vmx_offset = 0;
1543*4882a593Smuzhiyun int result = 0;
1544*4882a593Smuzhiyun
1545*4882a593Smuzhiyun vmx_offset =
1546*4882a593Smuzhiyun kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1547*4882a593Smuzhiyun
1548*4882a593Smuzhiyun if (vmx_offset == -1)
1549*4882a593Smuzhiyun return -1;
1550*4882a593Smuzhiyun
1551*4882a593Smuzhiyun reg.vval = VCPU_VSX_VR(vcpu, index);
1552*4882a593Smuzhiyun *val = reg.vsx32val[vmx_offset];
1553*4882a593Smuzhiyun
1554*4882a593Smuzhiyun return result;
1555*4882a593Smuzhiyun }
1556*4882a593Smuzhiyun
kvmppc_get_vmx_hword(struct kvm_vcpu * vcpu,int index,u64 * val)1557*4882a593Smuzhiyun static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
1558*4882a593Smuzhiyun {
1559*4882a593Smuzhiyun union kvmppc_one_reg reg;
1560*4882a593Smuzhiyun int vmx_offset = 0;
1561*4882a593Smuzhiyun int result = 0;
1562*4882a593Smuzhiyun
1563*4882a593Smuzhiyun vmx_offset =
1564*4882a593Smuzhiyun kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1565*4882a593Smuzhiyun
1566*4882a593Smuzhiyun if (vmx_offset == -1)
1567*4882a593Smuzhiyun return -1;
1568*4882a593Smuzhiyun
1569*4882a593Smuzhiyun reg.vval = VCPU_VSX_VR(vcpu, index);
1570*4882a593Smuzhiyun *val = reg.vsx16val[vmx_offset];
1571*4882a593Smuzhiyun
1572*4882a593Smuzhiyun return result;
1573*4882a593Smuzhiyun }
1574*4882a593Smuzhiyun
kvmppc_get_vmx_byte(struct kvm_vcpu * vcpu,int index,u64 * val)1575*4882a593Smuzhiyun static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
1576*4882a593Smuzhiyun {
1577*4882a593Smuzhiyun union kvmppc_one_reg reg;
1578*4882a593Smuzhiyun int vmx_offset = 0;
1579*4882a593Smuzhiyun int result = 0;
1580*4882a593Smuzhiyun
1581*4882a593Smuzhiyun vmx_offset =
1582*4882a593Smuzhiyun kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1583*4882a593Smuzhiyun
1584*4882a593Smuzhiyun if (vmx_offset == -1)
1585*4882a593Smuzhiyun return -1;
1586*4882a593Smuzhiyun
1587*4882a593Smuzhiyun reg.vval = VCPU_VSX_VR(vcpu, index);
1588*4882a593Smuzhiyun *val = reg.vsx8val[vmx_offset];
1589*4882a593Smuzhiyun
1590*4882a593Smuzhiyun return result;
1591*4882a593Smuzhiyun }
1592*4882a593Smuzhiyun
kvmppc_handle_vmx_store(struct kvm_vcpu * vcpu,unsigned int rs,unsigned int bytes,int is_default_endian)1593*4882a593Smuzhiyun int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
1594*4882a593Smuzhiyun unsigned int rs, unsigned int bytes, int is_default_endian)
1595*4882a593Smuzhiyun {
1596*4882a593Smuzhiyun u64 val = 0;
1597*4882a593Smuzhiyun unsigned int index = rs & KVM_MMIO_REG_MASK;
1598*4882a593Smuzhiyun enum emulation_result emulated = EMULATE_DONE;
1599*4882a593Smuzhiyun
1600*4882a593Smuzhiyun if (vcpu->arch.mmio_vmx_copy_nums > 2)
1601*4882a593Smuzhiyun return EMULATE_FAIL;
1602*4882a593Smuzhiyun
1603*4882a593Smuzhiyun vcpu->arch.io_gpr = rs;
1604*4882a593Smuzhiyun
1605*4882a593Smuzhiyun while (vcpu->arch.mmio_vmx_copy_nums) {
1606*4882a593Smuzhiyun switch (vcpu->arch.mmio_copy_type) {
1607*4882a593Smuzhiyun case KVMPPC_VMX_COPY_DWORD:
1608*4882a593Smuzhiyun if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1)
1609*4882a593Smuzhiyun return EMULATE_FAIL;
1610*4882a593Smuzhiyun
1611*4882a593Smuzhiyun break;
1612*4882a593Smuzhiyun case KVMPPC_VMX_COPY_WORD:
1613*4882a593Smuzhiyun if (kvmppc_get_vmx_word(vcpu, index, &val) == -1)
1614*4882a593Smuzhiyun return EMULATE_FAIL;
1615*4882a593Smuzhiyun break;
1616*4882a593Smuzhiyun case KVMPPC_VMX_COPY_HWORD:
1617*4882a593Smuzhiyun if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1)
1618*4882a593Smuzhiyun return EMULATE_FAIL;
1619*4882a593Smuzhiyun break;
1620*4882a593Smuzhiyun case KVMPPC_VMX_COPY_BYTE:
1621*4882a593Smuzhiyun if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1)
1622*4882a593Smuzhiyun return EMULATE_FAIL;
1623*4882a593Smuzhiyun break;
1624*4882a593Smuzhiyun default:
1625*4882a593Smuzhiyun return EMULATE_FAIL;
1626*4882a593Smuzhiyun }
1627*4882a593Smuzhiyun
1628*4882a593Smuzhiyun emulated = kvmppc_handle_store(vcpu, val, bytes,
1629*4882a593Smuzhiyun is_default_endian);
1630*4882a593Smuzhiyun if (emulated != EMULATE_DONE)
1631*4882a593Smuzhiyun break;
1632*4882a593Smuzhiyun
1633*4882a593Smuzhiyun vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1634*4882a593Smuzhiyun vcpu->arch.mmio_vmx_copy_nums--;
1635*4882a593Smuzhiyun vcpu->arch.mmio_vmx_offset++;
1636*4882a593Smuzhiyun }
1637*4882a593Smuzhiyun
1638*4882a593Smuzhiyun return emulated;
1639*4882a593Smuzhiyun }
1640*4882a593Smuzhiyun
kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu * vcpu)1641*4882a593Smuzhiyun static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu)
1642*4882a593Smuzhiyun {
1643*4882a593Smuzhiyun struct kvm_run *run = vcpu->run;
1644*4882a593Smuzhiyun enum emulation_result emulated = EMULATE_FAIL;
1645*4882a593Smuzhiyun int r;
1646*4882a593Smuzhiyun
1647*4882a593Smuzhiyun vcpu->arch.paddr_accessed += run->mmio.len;
1648*4882a593Smuzhiyun
1649*4882a593Smuzhiyun if (!vcpu->mmio_is_write) {
1650*4882a593Smuzhiyun emulated = kvmppc_handle_vmx_load(vcpu,
1651*4882a593Smuzhiyun vcpu->arch.io_gpr, run->mmio.len, 1);
1652*4882a593Smuzhiyun } else {
1653*4882a593Smuzhiyun emulated = kvmppc_handle_vmx_store(vcpu,
1654*4882a593Smuzhiyun vcpu->arch.io_gpr, run->mmio.len, 1);
1655*4882a593Smuzhiyun }
1656*4882a593Smuzhiyun
1657*4882a593Smuzhiyun switch (emulated) {
1658*4882a593Smuzhiyun case EMULATE_DO_MMIO:
1659*4882a593Smuzhiyun run->exit_reason = KVM_EXIT_MMIO;
1660*4882a593Smuzhiyun r = RESUME_HOST;
1661*4882a593Smuzhiyun break;
1662*4882a593Smuzhiyun case EMULATE_FAIL:
1663*4882a593Smuzhiyun pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
1664*4882a593Smuzhiyun run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1665*4882a593Smuzhiyun run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1666*4882a593Smuzhiyun r = RESUME_HOST;
1667*4882a593Smuzhiyun break;
1668*4882a593Smuzhiyun default:
1669*4882a593Smuzhiyun r = RESUME_GUEST;
1670*4882a593Smuzhiyun break;
1671*4882a593Smuzhiyun }
1672*4882a593Smuzhiyun return r;
1673*4882a593Smuzhiyun }
1674*4882a593Smuzhiyun #endif /* CONFIG_ALTIVEC */
1675*4882a593Smuzhiyun
kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu * vcpu,struct kvm_one_reg * reg)1676*4882a593Smuzhiyun int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1677*4882a593Smuzhiyun {
1678*4882a593Smuzhiyun int r = 0;
1679*4882a593Smuzhiyun union kvmppc_one_reg val;
1680*4882a593Smuzhiyun int size;
1681*4882a593Smuzhiyun
1682*4882a593Smuzhiyun size = one_reg_size(reg->id);
1683*4882a593Smuzhiyun if (size > sizeof(val))
1684*4882a593Smuzhiyun return -EINVAL;
1685*4882a593Smuzhiyun
1686*4882a593Smuzhiyun r = kvmppc_get_one_reg(vcpu, reg->id, &val);
1687*4882a593Smuzhiyun if (r == -EINVAL) {
1688*4882a593Smuzhiyun r = 0;
1689*4882a593Smuzhiyun switch (reg->id) {
1690*4882a593Smuzhiyun #ifdef CONFIG_ALTIVEC
1691*4882a593Smuzhiyun case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1692*4882a593Smuzhiyun if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1693*4882a593Smuzhiyun r = -ENXIO;
1694*4882a593Smuzhiyun break;
1695*4882a593Smuzhiyun }
1696*4882a593Smuzhiyun val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
1697*4882a593Smuzhiyun break;
1698*4882a593Smuzhiyun case KVM_REG_PPC_VSCR:
1699*4882a593Smuzhiyun if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1700*4882a593Smuzhiyun r = -ENXIO;
1701*4882a593Smuzhiyun break;
1702*4882a593Smuzhiyun }
1703*4882a593Smuzhiyun val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
1704*4882a593Smuzhiyun break;
1705*4882a593Smuzhiyun case KVM_REG_PPC_VRSAVE:
1706*4882a593Smuzhiyun val = get_reg_val(reg->id, vcpu->arch.vrsave);
1707*4882a593Smuzhiyun break;
1708*4882a593Smuzhiyun #endif /* CONFIG_ALTIVEC */
1709*4882a593Smuzhiyun default:
1710*4882a593Smuzhiyun r = -EINVAL;
1711*4882a593Smuzhiyun break;
1712*4882a593Smuzhiyun }
1713*4882a593Smuzhiyun }
1714*4882a593Smuzhiyun
1715*4882a593Smuzhiyun if (r)
1716*4882a593Smuzhiyun return r;
1717*4882a593Smuzhiyun
1718*4882a593Smuzhiyun if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1719*4882a593Smuzhiyun r = -EFAULT;
1720*4882a593Smuzhiyun
1721*4882a593Smuzhiyun return r;
1722*4882a593Smuzhiyun }
1723*4882a593Smuzhiyun
kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu * vcpu,struct kvm_one_reg * reg)1724*4882a593Smuzhiyun int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1725*4882a593Smuzhiyun {
1726*4882a593Smuzhiyun int r;
1727*4882a593Smuzhiyun union kvmppc_one_reg val;
1728*4882a593Smuzhiyun int size;
1729*4882a593Smuzhiyun
1730*4882a593Smuzhiyun size = one_reg_size(reg->id);
1731*4882a593Smuzhiyun if (size > sizeof(val))
1732*4882a593Smuzhiyun return -EINVAL;
1733*4882a593Smuzhiyun
1734*4882a593Smuzhiyun if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1735*4882a593Smuzhiyun return -EFAULT;
1736*4882a593Smuzhiyun
1737*4882a593Smuzhiyun r = kvmppc_set_one_reg(vcpu, reg->id, &val);
1738*4882a593Smuzhiyun if (r == -EINVAL) {
1739*4882a593Smuzhiyun r = 0;
1740*4882a593Smuzhiyun switch (reg->id) {
1741*4882a593Smuzhiyun #ifdef CONFIG_ALTIVEC
1742*4882a593Smuzhiyun case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1743*4882a593Smuzhiyun if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1744*4882a593Smuzhiyun r = -ENXIO;
1745*4882a593Smuzhiyun break;
1746*4882a593Smuzhiyun }
1747*4882a593Smuzhiyun vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
1748*4882a593Smuzhiyun break;
1749*4882a593Smuzhiyun case KVM_REG_PPC_VSCR:
1750*4882a593Smuzhiyun if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1751*4882a593Smuzhiyun r = -ENXIO;
1752*4882a593Smuzhiyun break;
1753*4882a593Smuzhiyun }
1754*4882a593Smuzhiyun vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
1755*4882a593Smuzhiyun break;
1756*4882a593Smuzhiyun case KVM_REG_PPC_VRSAVE:
1757*4882a593Smuzhiyun if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1758*4882a593Smuzhiyun r = -ENXIO;
1759*4882a593Smuzhiyun break;
1760*4882a593Smuzhiyun }
1761*4882a593Smuzhiyun vcpu->arch.vrsave = set_reg_val(reg->id, val);
1762*4882a593Smuzhiyun break;
1763*4882a593Smuzhiyun #endif /* CONFIG_ALTIVEC */
1764*4882a593Smuzhiyun default:
1765*4882a593Smuzhiyun r = -EINVAL;
1766*4882a593Smuzhiyun break;
1767*4882a593Smuzhiyun }
1768*4882a593Smuzhiyun }
1769*4882a593Smuzhiyun
1770*4882a593Smuzhiyun return r;
1771*4882a593Smuzhiyun }
1772*4882a593Smuzhiyun
kvm_arch_vcpu_ioctl_run(struct kvm_vcpu * vcpu)1773*4882a593Smuzhiyun int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1774*4882a593Smuzhiyun {
1775*4882a593Smuzhiyun struct kvm_run *run = vcpu->run;
1776*4882a593Smuzhiyun int r;
1777*4882a593Smuzhiyun
1778*4882a593Smuzhiyun vcpu_load(vcpu);
1779*4882a593Smuzhiyun
1780*4882a593Smuzhiyun if (vcpu->mmio_needed) {
1781*4882a593Smuzhiyun vcpu->mmio_needed = 0;
1782*4882a593Smuzhiyun if (!vcpu->mmio_is_write)
1783*4882a593Smuzhiyun kvmppc_complete_mmio_load(vcpu);
1784*4882a593Smuzhiyun #ifdef CONFIG_VSX
1785*4882a593Smuzhiyun if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1786*4882a593Smuzhiyun vcpu->arch.mmio_vsx_copy_nums--;
1787*4882a593Smuzhiyun vcpu->arch.mmio_vsx_offset++;
1788*4882a593Smuzhiyun }
1789*4882a593Smuzhiyun
1790*4882a593Smuzhiyun if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1791*4882a593Smuzhiyun r = kvmppc_emulate_mmio_vsx_loadstore(vcpu);
1792*4882a593Smuzhiyun if (r == RESUME_HOST) {
1793*4882a593Smuzhiyun vcpu->mmio_needed = 1;
1794*4882a593Smuzhiyun goto out;
1795*4882a593Smuzhiyun }
1796*4882a593Smuzhiyun }
1797*4882a593Smuzhiyun #endif
1798*4882a593Smuzhiyun #ifdef CONFIG_ALTIVEC
1799*4882a593Smuzhiyun if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1800*4882a593Smuzhiyun vcpu->arch.mmio_vmx_copy_nums--;
1801*4882a593Smuzhiyun vcpu->arch.mmio_vmx_offset++;
1802*4882a593Smuzhiyun }
1803*4882a593Smuzhiyun
1804*4882a593Smuzhiyun if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1805*4882a593Smuzhiyun r = kvmppc_emulate_mmio_vmx_loadstore(vcpu);
1806*4882a593Smuzhiyun if (r == RESUME_HOST) {
1807*4882a593Smuzhiyun vcpu->mmio_needed = 1;
1808*4882a593Smuzhiyun goto out;
1809*4882a593Smuzhiyun }
1810*4882a593Smuzhiyun }
1811*4882a593Smuzhiyun #endif
1812*4882a593Smuzhiyun } else if (vcpu->arch.osi_needed) {
1813*4882a593Smuzhiyun u64 *gprs = run->osi.gprs;
1814*4882a593Smuzhiyun int i;
1815*4882a593Smuzhiyun
1816*4882a593Smuzhiyun for (i = 0; i < 32; i++)
1817*4882a593Smuzhiyun kvmppc_set_gpr(vcpu, i, gprs[i]);
1818*4882a593Smuzhiyun vcpu->arch.osi_needed = 0;
1819*4882a593Smuzhiyun } else if (vcpu->arch.hcall_needed) {
1820*4882a593Smuzhiyun int i;
1821*4882a593Smuzhiyun
1822*4882a593Smuzhiyun kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1823*4882a593Smuzhiyun for (i = 0; i < 9; ++i)
1824*4882a593Smuzhiyun kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1825*4882a593Smuzhiyun vcpu->arch.hcall_needed = 0;
1826*4882a593Smuzhiyun #ifdef CONFIG_BOOKE
1827*4882a593Smuzhiyun } else if (vcpu->arch.epr_needed) {
1828*4882a593Smuzhiyun kvmppc_set_epr(vcpu, run->epr.epr);
1829*4882a593Smuzhiyun vcpu->arch.epr_needed = 0;
1830*4882a593Smuzhiyun #endif
1831*4882a593Smuzhiyun }
1832*4882a593Smuzhiyun
1833*4882a593Smuzhiyun kvm_sigset_activate(vcpu);
1834*4882a593Smuzhiyun
1835*4882a593Smuzhiyun if (run->immediate_exit)
1836*4882a593Smuzhiyun r = -EINTR;
1837*4882a593Smuzhiyun else
1838*4882a593Smuzhiyun r = kvmppc_vcpu_run(vcpu);
1839*4882a593Smuzhiyun
1840*4882a593Smuzhiyun kvm_sigset_deactivate(vcpu);
1841*4882a593Smuzhiyun
1842*4882a593Smuzhiyun #ifdef CONFIG_ALTIVEC
1843*4882a593Smuzhiyun out:
1844*4882a593Smuzhiyun #endif
1845*4882a593Smuzhiyun vcpu_put(vcpu);
1846*4882a593Smuzhiyun return r;
1847*4882a593Smuzhiyun }
1848*4882a593Smuzhiyun
kvm_vcpu_ioctl_interrupt(struct kvm_vcpu * vcpu,struct kvm_interrupt * irq)1849*4882a593Smuzhiyun int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1850*4882a593Smuzhiyun {
1851*4882a593Smuzhiyun if (irq->irq == KVM_INTERRUPT_UNSET) {
1852*4882a593Smuzhiyun kvmppc_core_dequeue_external(vcpu);
1853*4882a593Smuzhiyun return 0;
1854*4882a593Smuzhiyun }
1855*4882a593Smuzhiyun
1856*4882a593Smuzhiyun kvmppc_core_queue_external(vcpu, irq);
1857*4882a593Smuzhiyun
1858*4882a593Smuzhiyun kvm_vcpu_kick(vcpu);
1859*4882a593Smuzhiyun
1860*4882a593Smuzhiyun return 0;
1861*4882a593Smuzhiyun }
1862*4882a593Smuzhiyun
kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu * vcpu,struct kvm_enable_cap * cap)1863*4882a593Smuzhiyun static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1864*4882a593Smuzhiyun struct kvm_enable_cap *cap)
1865*4882a593Smuzhiyun {
1866*4882a593Smuzhiyun int r;
1867*4882a593Smuzhiyun
1868*4882a593Smuzhiyun if (cap->flags)
1869*4882a593Smuzhiyun return -EINVAL;
1870*4882a593Smuzhiyun
1871*4882a593Smuzhiyun switch (cap->cap) {
1872*4882a593Smuzhiyun case KVM_CAP_PPC_OSI:
1873*4882a593Smuzhiyun r = 0;
1874*4882a593Smuzhiyun vcpu->arch.osi_enabled = true;
1875*4882a593Smuzhiyun break;
1876*4882a593Smuzhiyun case KVM_CAP_PPC_PAPR:
1877*4882a593Smuzhiyun r = 0;
1878*4882a593Smuzhiyun vcpu->arch.papr_enabled = true;
1879*4882a593Smuzhiyun break;
1880*4882a593Smuzhiyun case KVM_CAP_PPC_EPR:
1881*4882a593Smuzhiyun r = 0;
1882*4882a593Smuzhiyun if (cap->args[0])
1883*4882a593Smuzhiyun vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1884*4882a593Smuzhiyun else
1885*4882a593Smuzhiyun vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1886*4882a593Smuzhiyun break;
1887*4882a593Smuzhiyun #ifdef CONFIG_BOOKE
1888*4882a593Smuzhiyun case KVM_CAP_PPC_BOOKE_WATCHDOG:
1889*4882a593Smuzhiyun r = 0;
1890*4882a593Smuzhiyun vcpu->arch.watchdog_enabled = true;
1891*4882a593Smuzhiyun break;
1892*4882a593Smuzhiyun #endif
1893*4882a593Smuzhiyun #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1894*4882a593Smuzhiyun case KVM_CAP_SW_TLB: {
1895*4882a593Smuzhiyun struct kvm_config_tlb cfg;
1896*4882a593Smuzhiyun void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1897*4882a593Smuzhiyun
1898*4882a593Smuzhiyun r = -EFAULT;
1899*4882a593Smuzhiyun if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1900*4882a593Smuzhiyun break;
1901*4882a593Smuzhiyun
1902*4882a593Smuzhiyun r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1903*4882a593Smuzhiyun break;
1904*4882a593Smuzhiyun }
1905*4882a593Smuzhiyun #endif
1906*4882a593Smuzhiyun #ifdef CONFIG_KVM_MPIC
1907*4882a593Smuzhiyun case KVM_CAP_IRQ_MPIC: {
1908*4882a593Smuzhiyun struct fd f;
1909*4882a593Smuzhiyun struct kvm_device *dev;
1910*4882a593Smuzhiyun
1911*4882a593Smuzhiyun r = -EBADF;
1912*4882a593Smuzhiyun f = fdget(cap->args[0]);
1913*4882a593Smuzhiyun if (!f.file)
1914*4882a593Smuzhiyun break;
1915*4882a593Smuzhiyun
1916*4882a593Smuzhiyun r = -EPERM;
1917*4882a593Smuzhiyun dev = kvm_device_from_filp(f.file);
1918*4882a593Smuzhiyun if (dev)
1919*4882a593Smuzhiyun r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1920*4882a593Smuzhiyun
1921*4882a593Smuzhiyun fdput(f);
1922*4882a593Smuzhiyun break;
1923*4882a593Smuzhiyun }
1924*4882a593Smuzhiyun #endif
1925*4882a593Smuzhiyun #ifdef CONFIG_KVM_XICS
1926*4882a593Smuzhiyun case KVM_CAP_IRQ_XICS: {
1927*4882a593Smuzhiyun struct fd f;
1928*4882a593Smuzhiyun struct kvm_device *dev;
1929*4882a593Smuzhiyun
1930*4882a593Smuzhiyun r = -EBADF;
1931*4882a593Smuzhiyun f = fdget(cap->args[0]);
1932*4882a593Smuzhiyun if (!f.file)
1933*4882a593Smuzhiyun break;
1934*4882a593Smuzhiyun
1935*4882a593Smuzhiyun r = -EPERM;
1936*4882a593Smuzhiyun dev = kvm_device_from_filp(f.file);
1937*4882a593Smuzhiyun if (dev) {
1938*4882a593Smuzhiyun if (xics_on_xive())
1939*4882a593Smuzhiyun r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
1940*4882a593Smuzhiyun else
1941*4882a593Smuzhiyun r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1942*4882a593Smuzhiyun }
1943*4882a593Smuzhiyun
1944*4882a593Smuzhiyun fdput(f);
1945*4882a593Smuzhiyun break;
1946*4882a593Smuzhiyun }
1947*4882a593Smuzhiyun #endif /* CONFIG_KVM_XICS */
1948*4882a593Smuzhiyun #ifdef CONFIG_KVM_XIVE
1949*4882a593Smuzhiyun case KVM_CAP_PPC_IRQ_XIVE: {
1950*4882a593Smuzhiyun struct fd f;
1951*4882a593Smuzhiyun struct kvm_device *dev;
1952*4882a593Smuzhiyun
1953*4882a593Smuzhiyun r = -EBADF;
1954*4882a593Smuzhiyun f = fdget(cap->args[0]);
1955*4882a593Smuzhiyun if (!f.file)
1956*4882a593Smuzhiyun break;
1957*4882a593Smuzhiyun
1958*4882a593Smuzhiyun r = -ENXIO;
1959*4882a593Smuzhiyun if (!xive_enabled())
1960*4882a593Smuzhiyun break;
1961*4882a593Smuzhiyun
1962*4882a593Smuzhiyun r = -EPERM;
1963*4882a593Smuzhiyun dev = kvm_device_from_filp(f.file);
1964*4882a593Smuzhiyun if (dev)
1965*4882a593Smuzhiyun r = kvmppc_xive_native_connect_vcpu(dev, vcpu,
1966*4882a593Smuzhiyun cap->args[1]);
1967*4882a593Smuzhiyun
1968*4882a593Smuzhiyun fdput(f);
1969*4882a593Smuzhiyun break;
1970*4882a593Smuzhiyun }
1971*4882a593Smuzhiyun #endif /* CONFIG_KVM_XIVE */
1972*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1973*4882a593Smuzhiyun case KVM_CAP_PPC_FWNMI:
1974*4882a593Smuzhiyun r = -EINVAL;
1975*4882a593Smuzhiyun if (!is_kvmppc_hv_enabled(vcpu->kvm))
1976*4882a593Smuzhiyun break;
1977*4882a593Smuzhiyun r = 0;
1978*4882a593Smuzhiyun vcpu->kvm->arch.fwnmi_enabled = true;
1979*4882a593Smuzhiyun break;
1980*4882a593Smuzhiyun #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
1981*4882a593Smuzhiyun default:
1982*4882a593Smuzhiyun r = -EINVAL;
1983*4882a593Smuzhiyun break;
1984*4882a593Smuzhiyun }
1985*4882a593Smuzhiyun
1986*4882a593Smuzhiyun if (!r)
1987*4882a593Smuzhiyun r = kvmppc_sanity_check(vcpu);
1988*4882a593Smuzhiyun
1989*4882a593Smuzhiyun return r;
1990*4882a593Smuzhiyun }
1991*4882a593Smuzhiyun
kvm_arch_intc_initialized(struct kvm * kvm)1992*4882a593Smuzhiyun bool kvm_arch_intc_initialized(struct kvm *kvm)
1993*4882a593Smuzhiyun {
1994*4882a593Smuzhiyun #ifdef CONFIG_KVM_MPIC
1995*4882a593Smuzhiyun if (kvm->arch.mpic)
1996*4882a593Smuzhiyun return true;
1997*4882a593Smuzhiyun #endif
1998*4882a593Smuzhiyun #ifdef CONFIG_KVM_XICS
1999*4882a593Smuzhiyun if (kvm->arch.xics || kvm->arch.xive)
2000*4882a593Smuzhiyun return true;
2001*4882a593Smuzhiyun #endif
2002*4882a593Smuzhiyun return false;
2003*4882a593Smuzhiyun }
2004*4882a593Smuzhiyun
kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)2005*4882a593Smuzhiyun int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2006*4882a593Smuzhiyun struct kvm_mp_state *mp_state)
2007*4882a593Smuzhiyun {
2008*4882a593Smuzhiyun return -EINVAL;
2009*4882a593Smuzhiyun }
2010*4882a593Smuzhiyun
kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)2011*4882a593Smuzhiyun int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2012*4882a593Smuzhiyun struct kvm_mp_state *mp_state)
2013*4882a593Smuzhiyun {
2014*4882a593Smuzhiyun return -EINVAL;
2015*4882a593Smuzhiyun }
2016*4882a593Smuzhiyun
kvm_arch_vcpu_async_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)2017*4882a593Smuzhiyun long kvm_arch_vcpu_async_ioctl(struct file *filp,
2018*4882a593Smuzhiyun unsigned int ioctl, unsigned long arg)
2019*4882a593Smuzhiyun {
2020*4882a593Smuzhiyun struct kvm_vcpu *vcpu = filp->private_data;
2021*4882a593Smuzhiyun void __user *argp = (void __user *)arg;
2022*4882a593Smuzhiyun
2023*4882a593Smuzhiyun if (ioctl == KVM_INTERRUPT) {
2024*4882a593Smuzhiyun struct kvm_interrupt irq;
2025*4882a593Smuzhiyun if (copy_from_user(&irq, argp, sizeof(irq)))
2026*4882a593Smuzhiyun return -EFAULT;
2027*4882a593Smuzhiyun return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2028*4882a593Smuzhiyun }
2029*4882a593Smuzhiyun return -ENOIOCTLCMD;
2030*4882a593Smuzhiyun }
2031*4882a593Smuzhiyun
kvm_arch_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)2032*4882a593Smuzhiyun long kvm_arch_vcpu_ioctl(struct file *filp,
2033*4882a593Smuzhiyun unsigned int ioctl, unsigned long arg)
2034*4882a593Smuzhiyun {
2035*4882a593Smuzhiyun struct kvm_vcpu *vcpu = filp->private_data;
2036*4882a593Smuzhiyun void __user *argp = (void __user *)arg;
2037*4882a593Smuzhiyun long r;
2038*4882a593Smuzhiyun
2039*4882a593Smuzhiyun switch (ioctl) {
2040*4882a593Smuzhiyun case KVM_ENABLE_CAP:
2041*4882a593Smuzhiyun {
2042*4882a593Smuzhiyun struct kvm_enable_cap cap;
2043*4882a593Smuzhiyun r = -EFAULT;
2044*4882a593Smuzhiyun if (copy_from_user(&cap, argp, sizeof(cap)))
2045*4882a593Smuzhiyun goto out;
2046*4882a593Smuzhiyun vcpu_load(vcpu);
2047*4882a593Smuzhiyun r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2048*4882a593Smuzhiyun vcpu_put(vcpu);
2049*4882a593Smuzhiyun break;
2050*4882a593Smuzhiyun }
2051*4882a593Smuzhiyun
2052*4882a593Smuzhiyun case KVM_SET_ONE_REG:
2053*4882a593Smuzhiyun case KVM_GET_ONE_REG:
2054*4882a593Smuzhiyun {
2055*4882a593Smuzhiyun struct kvm_one_reg reg;
2056*4882a593Smuzhiyun r = -EFAULT;
2057*4882a593Smuzhiyun if (copy_from_user(®, argp, sizeof(reg)))
2058*4882a593Smuzhiyun goto out;
2059*4882a593Smuzhiyun if (ioctl == KVM_SET_ONE_REG)
2060*4882a593Smuzhiyun r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®);
2061*4882a593Smuzhiyun else
2062*4882a593Smuzhiyun r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®);
2063*4882a593Smuzhiyun break;
2064*4882a593Smuzhiyun }
2065*4882a593Smuzhiyun
2066*4882a593Smuzhiyun #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
2067*4882a593Smuzhiyun case KVM_DIRTY_TLB: {
2068*4882a593Smuzhiyun struct kvm_dirty_tlb dirty;
2069*4882a593Smuzhiyun r = -EFAULT;
2070*4882a593Smuzhiyun if (copy_from_user(&dirty, argp, sizeof(dirty)))
2071*4882a593Smuzhiyun goto out;
2072*4882a593Smuzhiyun vcpu_load(vcpu);
2073*4882a593Smuzhiyun r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
2074*4882a593Smuzhiyun vcpu_put(vcpu);
2075*4882a593Smuzhiyun break;
2076*4882a593Smuzhiyun }
2077*4882a593Smuzhiyun #endif
2078*4882a593Smuzhiyun default:
2079*4882a593Smuzhiyun r = -EINVAL;
2080*4882a593Smuzhiyun }
2081*4882a593Smuzhiyun
2082*4882a593Smuzhiyun out:
2083*4882a593Smuzhiyun return r;
2084*4882a593Smuzhiyun }
2085*4882a593Smuzhiyun
kvm_arch_vcpu_fault(struct kvm_vcpu * vcpu,struct vm_fault * vmf)2086*4882a593Smuzhiyun vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2087*4882a593Smuzhiyun {
2088*4882a593Smuzhiyun return VM_FAULT_SIGBUS;
2089*4882a593Smuzhiyun }
2090*4882a593Smuzhiyun
kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo * pvinfo)2091*4882a593Smuzhiyun static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
2092*4882a593Smuzhiyun {
2093*4882a593Smuzhiyun u32 inst_nop = 0x60000000;
2094*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOKE_HV
2095*4882a593Smuzhiyun u32 inst_sc1 = 0x44000022;
2096*4882a593Smuzhiyun pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
2097*4882a593Smuzhiyun pvinfo->hcall[1] = cpu_to_be32(inst_nop);
2098*4882a593Smuzhiyun pvinfo->hcall[2] = cpu_to_be32(inst_nop);
2099*4882a593Smuzhiyun pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2100*4882a593Smuzhiyun #else
2101*4882a593Smuzhiyun u32 inst_lis = 0x3c000000;
2102*4882a593Smuzhiyun u32 inst_ori = 0x60000000;
2103*4882a593Smuzhiyun u32 inst_sc = 0x44000002;
2104*4882a593Smuzhiyun u32 inst_imm_mask = 0xffff;
2105*4882a593Smuzhiyun
2106*4882a593Smuzhiyun /*
2107*4882a593Smuzhiyun * The hypercall to get into KVM from within guest context is as
2108*4882a593Smuzhiyun * follows:
2109*4882a593Smuzhiyun *
2110*4882a593Smuzhiyun * lis r0, r0, KVM_SC_MAGIC_R0@h
2111*4882a593Smuzhiyun * ori r0, KVM_SC_MAGIC_R0@l
2112*4882a593Smuzhiyun * sc
2113*4882a593Smuzhiyun * nop
2114*4882a593Smuzhiyun */
2115*4882a593Smuzhiyun pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
2116*4882a593Smuzhiyun pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
2117*4882a593Smuzhiyun pvinfo->hcall[2] = cpu_to_be32(inst_sc);
2118*4882a593Smuzhiyun pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2119*4882a593Smuzhiyun #endif
2120*4882a593Smuzhiyun
2121*4882a593Smuzhiyun pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
2122*4882a593Smuzhiyun
2123*4882a593Smuzhiyun return 0;
2124*4882a593Smuzhiyun }
2125*4882a593Smuzhiyun
kvm_vm_ioctl_irq_line(struct kvm * kvm,struct kvm_irq_level * irq_event,bool line_status)2126*4882a593Smuzhiyun int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
2127*4882a593Smuzhiyun bool line_status)
2128*4882a593Smuzhiyun {
2129*4882a593Smuzhiyun if (!irqchip_in_kernel(kvm))
2130*4882a593Smuzhiyun return -ENXIO;
2131*4882a593Smuzhiyun
2132*4882a593Smuzhiyun irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
2133*4882a593Smuzhiyun irq_event->irq, irq_event->level,
2134*4882a593Smuzhiyun line_status);
2135*4882a593Smuzhiyun return 0;
2136*4882a593Smuzhiyun }
2137*4882a593Smuzhiyun
2138*4882a593Smuzhiyun
kvm_vm_ioctl_enable_cap(struct kvm * kvm,struct kvm_enable_cap * cap)2139*4882a593Smuzhiyun int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
2140*4882a593Smuzhiyun struct kvm_enable_cap *cap)
2141*4882a593Smuzhiyun {
2142*4882a593Smuzhiyun int r;
2143*4882a593Smuzhiyun
2144*4882a593Smuzhiyun if (cap->flags)
2145*4882a593Smuzhiyun return -EINVAL;
2146*4882a593Smuzhiyun
2147*4882a593Smuzhiyun switch (cap->cap) {
2148*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
2149*4882a593Smuzhiyun case KVM_CAP_PPC_ENABLE_HCALL: {
2150*4882a593Smuzhiyun unsigned long hcall = cap->args[0];
2151*4882a593Smuzhiyun
2152*4882a593Smuzhiyun r = -EINVAL;
2153*4882a593Smuzhiyun if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
2154*4882a593Smuzhiyun cap->args[1] > 1)
2155*4882a593Smuzhiyun break;
2156*4882a593Smuzhiyun if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
2157*4882a593Smuzhiyun break;
2158*4882a593Smuzhiyun if (cap->args[1])
2159*4882a593Smuzhiyun set_bit(hcall / 4, kvm->arch.enabled_hcalls);
2160*4882a593Smuzhiyun else
2161*4882a593Smuzhiyun clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
2162*4882a593Smuzhiyun r = 0;
2163*4882a593Smuzhiyun break;
2164*4882a593Smuzhiyun }
2165*4882a593Smuzhiyun case KVM_CAP_PPC_SMT: {
2166*4882a593Smuzhiyun unsigned long mode = cap->args[0];
2167*4882a593Smuzhiyun unsigned long flags = cap->args[1];
2168*4882a593Smuzhiyun
2169*4882a593Smuzhiyun r = -EINVAL;
2170*4882a593Smuzhiyun if (kvm->arch.kvm_ops->set_smt_mode)
2171*4882a593Smuzhiyun r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
2172*4882a593Smuzhiyun break;
2173*4882a593Smuzhiyun }
2174*4882a593Smuzhiyun
2175*4882a593Smuzhiyun case KVM_CAP_PPC_NESTED_HV:
2176*4882a593Smuzhiyun r = -EINVAL;
2177*4882a593Smuzhiyun if (!is_kvmppc_hv_enabled(kvm) ||
2178*4882a593Smuzhiyun !kvm->arch.kvm_ops->enable_nested)
2179*4882a593Smuzhiyun break;
2180*4882a593Smuzhiyun r = kvm->arch.kvm_ops->enable_nested(kvm);
2181*4882a593Smuzhiyun break;
2182*4882a593Smuzhiyun #endif
2183*4882a593Smuzhiyun #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
2184*4882a593Smuzhiyun case KVM_CAP_PPC_SECURE_GUEST:
2185*4882a593Smuzhiyun r = -EINVAL;
2186*4882a593Smuzhiyun if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm)
2187*4882a593Smuzhiyun break;
2188*4882a593Smuzhiyun r = kvm->arch.kvm_ops->enable_svm(kvm);
2189*4882a593Smuzhiyun break;
2190*4882a593Smuzhiyun #endif
2191*4882a593Smuzhiyun default:
2192*4882a593Smuzhiyun r = -EINVAL;
2193*4882a593Smuzhiyun break;
2194*4882a593Smuzhiyun }
2195*4882a593Smuzhiyun
2196*4882a593Smuzhiyun return r;
2197*4882a593Smuzhiyun }
2198*4882a593Smuzhiyun
2199*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
2200*4882a593Smuzhiyun /*
2201*4882a593Smuzhiyun * These functions check whether the underlying hardware is safe
2202*4882a593Smuzhiyun * against attacks based on observing the effects of speculatively
2203*4882a593Smuzhiyun * executed instructions, and whether it supplies instructions for
2204*4882a593Smuzhiyun * use in workarounds. The information comes from firmware, either
2205*4882a593Smuzhiyun * via the device tree on powernv platforms or from an hcall on
2206*4882a593Smuzhiyun * pseries platforms.
2207*4882a593Smuzhiyun */
2208*4882a593Smuzhiyun #ifdef CONFIG_PPC_PSERIES
pseries_get_cpu_char(struct kvm_ppc_cpu_char * cp)2209*4882a593Smuzhiyun static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2210*4882a593Smuzhiyun {
2211*4882a593Smuzhiyun struct h_cpu_char_result c;
2212*4882a593Smuzhiyun unsigned long rc;
2213*4882a593Smuzhiyun
2214*4882a593Smuzhiyun if (!machine_is(pseries))
2215*4882a593Smuzhiyun return -ENOTTY;
2216*4882a593Smuzhiyun
2217*4882a593Smuzhiyun rc = plpar_get_cpu_characteristics(&c);
2218*4882a593Smuzhiyun if (rc == H_SUCCESS) {
2219*4882a593Smuzhiyun cp->character = c.character;
2220*4882a593Smuzhiyun cp->behaviour = c.behaviour;
2221*4882a593Smuzhiyun cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2222*4882a593Smuzhiyun KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2223*4882a593Smuzhiyun KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2224*4882a593Smuzhiyun KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2225*4882a593Smuzhiyun KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2226*4882a593Smuzhiyun KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
2227*4882a593Smuzhiyun KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
2228*4882a593Smuzhiyun KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2229*4882a593Smuzhiyun KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2230*4882a593Smuzhiyun cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2231*4882a593Smuzhiyun KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2232*4882a593Smuzhiyun KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2233*4882a593Smuzhiyun KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2234*4882a593Smuzhiyun }
2235*4882a593Smuzhiyun return 0;
2236*4882a593Smuzhiyun }
2237*4882a593Smuzhiyun #else
pseries_get_cpu_char(struct kvm_ppc_cpu_char * cp)2238*4882a593Smuzhiyun static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2239*4882a593Smuzhiyun {
2240*4882a593Smuzhiyun return -ENOTTY;
2241*4882a593Smuzhiyun }
2242*4882a593Smuzhiyun #endif
2243*4882a593Smuzhiyun
have_fw_feat(struct device_node * fw_features,const char * state,const char * name)2244*4882a593Smuzhiyun static inline bool have_fw_feat(struct device_node *fw_features,
2245*4882a593Smuzhiyun const char *state, const char *name)
2246*4882a593Smuzhiyun {
2247*4882a593Smuzhiyun struct device_node *np;
2248*4882a593Smuzhiyun bool r = false;
2249*4882a593Smuzhiyun
2250*4882a593Smuzhiyun np = of_get_child_by_name(fw_features, name);
2251*4882a593Smuzhiyun if (np) {
2252*4882a593Smuzhiyun r = of_property_read_bool(np, state);
2253*4882a593Smuzhiyun of_node_put(np);
2254*4882a593Smuzhiyun }
2255*4882a593Smuzhiyun return r;
2256*4882a593Smuzhiyun }
2257*4882a593Smuzhiyun
kvmppc_get_cpu_char(struct kvm_ppc_cpu_char * cp)2258*4882a593Smuzhiyun static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2259*4882a593Smuzhiyun {
2260*4882a593Smuzhiyun struct device_node *np, *fw_features;
2261*4882a593Smuzhiyun int r;
2262*4882a593Smuzhiyun
2263*4882a593Smuzhiyun memset(cp, 0, sizeof(*cp));
2264*4882a593Smuzhiyun r = pseries_get_cpu_char(cp);
2265*4882a593Smuzhiyun if (r != -ENOTTY)
2266*4882a593Smuzhiyun return r;
2267*4882a593Smuzhiyun
2268*4882a593Smuzhiyun np = of_find_node_by_name(NULL, "ibm,opal");
2269*4882a593Smuzhiyun if (np) {
2270*4882a593Smuzhiyun fw_features = of_get_child_by_name(np, "fw-features");
2271*4882a593Smuzhiyun of_node_put(np);
2272*4882a593Smuzhiyun if (!fw_features)
2273*4882a593Smuzhiyun return 0;
2274*4882a593Smuzhiyun if (have_fw_feat(fw_features, "enabled",
2275*4882a593Smuzhiyun "inst-spec-barrier-ori31,31,0"))
2276*4882a593Smuzhiyun cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
2277*4882a593Smuzhiyun if (have_fw_feat(fw_features, "enabled",
2278*4882a593Smuzhiyun "fw-bcctrl-serialized"))
2279*4882a593Smuzhiyun cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
2280*4882a593Smuzhiyun if (have_fw_feat(fw_features, "enabled",
2281*4882a593Smuzhiyun "inst-l1d-flush-ori30,30,0"))
2282*4882a593Smuzhiyun cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
2283*4882a593Smuzhiyun if (have_fw_feat(fw_features, "enabled",
2284*4882a593Smuzhiyun "inst-l1d-flush-trig2"))
2285*4882a593Smuzhiyun cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
2286*4882a593Smuzhiyun if (have_fw_feat(fw_features, "enabled",
2287*4882a593Smuzhiyun "fw-l1d-thread-split"))
2288*4882a593Smuzhiyun cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
2289*4882a593Smuzhiyun if (have_fw_feat(fw_features, "enabled",
2290*4882a593Smuzhiyun "fw-count-cache-disabled"))
2291*4882a593Smuzhiyun cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
2292*4882a593Smuzhiyun if (have_fw_feat(fw_features, "enabled",
2293*4882a593Smuzhiyun "fw-count-cache-flush-bcctr2,0,0"))
2294*4882a593Smuzhiyun cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2295*4882a593Smuzhiyun cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2296*4882a593Smuzhiyun KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2297*4882a593Smuzhiyun KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2298*4882a593Smuzhiyun KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2299*4882a593Smuzhiyun KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2300*4882a593Smuzhiyun KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2301*4882a593Smuzhiyun KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2302*4882a593Smuzhiyun
2303*4882a593Smuzhiyun if (have_fw_feat(fw_features, "enabled",
2304*4882a593Smuzhiyun "speculation-policy-favor-security"))
2305*4882a593Smuzhiyun cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
2306*4882a593Smuzhiyun if (!have_fw_feat(fw_features, "disabled",
2307*4882a593Smuzhiyun "needs-l1d-flush-msr-pr-0-to-1"))
2308*4882a593Smuzhiyun cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
2309*4882a593Smuzhiyun if (!have_fw_feat(fw_features, "disabled",
2310*4882a593Smuzhiyun "needs-spec-barrier-for-bound-checks"))
2311*4882a593Smuzhiyun cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
2312*4882a593Smuzhiyun if (have_fw_feat(fw_features, "enabled",
2313*4882a593Smuzhiyun "needs-count-cache-flush-on-context-switch"))
2314*4882a593Smuzhiyun cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2315*4882a593Smuzhiyun cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2316*4882a593Smuzhiyun KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2317*4882a593Smuzhiyun KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2318*4882a593Smuzhiyun KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2319*4882a593Smuzhiyun
2320*4882a593Smuzhiyun of_node_put(fw_features);
2321*4882a593Smuzhiyun }
2322*4882a593Smuzhiyun
2323*4882a593Smuzhiyun return 0;
2324*4882a593Smuzhiyun }
2325*4882a593Smuzhiyun #endif
2326*4882a593Smuzhiyun
kvm_arch_vm_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)2327*4882a593Smuzhiyun long kvm_arch_vm_ioctl(struct file *filp,
2328*4882a593Smuzhiyun unsigned int ioctl, unsigned long arg)
2329*4882a593Smuzhiyun {
2330*4882a593Smuzhiyun struct kvm *kvm __maybe_unused = filp->private_data;
2331*4882a593Smuzhiyun void __user *argp = (void __user *)arg;
2332*4882a593Smuzhiyun long r;
2333*4882a593Smuzhiyun
2334*4882a593Smuzhiyun switch (ioctl) {
2335*4882a593Smuzhiyun case KVM_PPC_GET_PVINFO: {
2336*4882a593Smuzhiyun struct kvm_ppc_pvinfo pvinfo;
2337*4882a593Smuzhiyun memset(&pvinfo, 0, sizeof(pvinfo));
2338*4882a593Smuzhiyun r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
2339*4882a593Smuzhiyun if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
2340*4882a593Smuzhiyun r = -EFAULT;
2341*4882a593Smuzhiyun goto out;
2342*4882a593Smuzhiyun }
2343*4882a593Smuzhiyun
2344*4882a593Smuzhiyun break;
2345*4882a593Smuzhiyun }
2346*4882a593Smuzhiyun #ifdef CONFIG_SPAPR_TCE_IOMMU
2347*4882a593Smuzhiyun case KVM_CREATE_SPAPR_TCE_64: {
2348*4882a593Smuzhiyun struct kvm_create_spapr_tce_64 create_tce_64;
2349*4882a593Smuzhiyun
2350*4882a593Smuzhiyun r = -EFAULT;
2351*4882a593Smuzhiyun if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
2352*4882a593Smuzhiyun goto out;
2353*4882a593Smuzhiyun if (create_tce_64.flags) {
2354*4882a593Smuzhiyun r = -EINVAL;
2355*4882a593Smuzhiyun goto out;
2356*4882a593Smuzhiyun }
2357*4882a593Smuzhiyun r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2358*4882a593Smuzhiyun goto out;
2359*4882a593Smuzhiyun }
2360*4882a593Smuzhiyun case KVM_CREATE_SPAPR_TCE: {
2361*4882a593Smuzhiyun struct kvm_create_spapr_tce create_tce;
2362*4882a593Smuzhiyun struct kvm_create_spapr_tce_64 create_tce_64;
2363*4882a593Smuzhiyun
2364*4882a593Smuzhiyun r = -EFAULT;
2365*4882a593Smuzhiyun if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
2366*4882a593Smuzhiyun goto out;
2367*4882a593Smuzhiyun
2368*4882a593Smuzhiyun create_tce_64.liobn = create_tce.liobn;
2369*4882a593Smuzhiyun create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
2370*4882a593Smuzhiyun create_tce_64.offset = 0;
2371*4882a593Smuzhiyun create_tce_64.size = create_tce.window_size >>
2372*4882a593Smuzhiyun IOMMU_PAGE_SHIFT_4K;
2373*4882a593Smuzhiyun create_tce_64.flags = 0;
2374*4882a593Smuzhiyun r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2375*4882a593Smuzhiyun goto out;
2376*4882a593Smuzhiyun }
2377*4882a593Smuzhiyun #endif
2378*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
2379*4882a593Smuzhiyun case KVM_PPC_GET_SMMU_INFO: {
2380*4882a593Smuzhiyun struct kvm_ppc_smmu_info info;
2381*4882a593Smuzhiyun struct kvm *kvm = filp->private_data;
2382*4882a593Smuzhiyun
2383*4882a593Smuzhiyun memset(&info, 0, sizeof(info));
2384*4882a593Smuzhiyun r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
2385*4882a593Smuzhiyun if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2386*4882a593Smuzhiyun r = -EFAULT;
2387*4882a593Smuzhiyun break;
2388*4882a593Smuzhiyun }
2389*4882a593Smuzhiyun case KVM_PPC_RTAS_DEFINE_TOKEN: {
2390*4882a593Smuzhiyun struct kvm *kvm = filp->private_data;
2391*4882a593Smuzhiyun
2392*4882a593Smuzhiyun r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
2393*4882a593Smuzhiyun break;
2394*4882a593Smuzhiyun }
2395*4882a593Smuzhiyun case KVM_PPC_CONFIGURE_V3_MMU: {
2396*4882a593Smuzhiyun struct kvm *kvm = filp->private_data;
2397*4882a593Smuzhiyun struct kvm_ppc_mmuv3_cfg cfg;
2398*4882a593Smuzhiyun
2399*4882a593Smuzhiyun r = -EINVAL;
2400*4882a593Smuzhiyun if (!kvm->arch.kvm_ops->configure_mmu)
2401*4882a593Smuzhiyun goto out;
2402*4882a593Smuzhiyun r = -EFAULT;
2403*4882a593Smuzhiyun if (copy_from_user(&cfg, argp, sizeof(cfg)))
2404*4882a593Smuzhiyun goto out;
2405*4882a593Smuzhiyun r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
2406*4882a593Smuzhiyun break;
2407*4882a593Smuzhiyun }
2408*4882a593Smuzhiyun case KVM_PPC_GET_RMMU_INFO: {
2409*4882a593Smuzhiyun struct kvm *kvm = filp->private_data;
2410*4882a593Smuzhiyun struct kvm_ppc_rmmu_info info;
2411*4882a593Smuzhiyun
2412*4882a593Smuzhiyun r = -EINVAL;
2413*4882a593Smuzhiyun if (!kvm->arch.kvm_ops->get_rmmu_info)
2414*4882a593Smuzhiyun goto out;
2415*4882a593Smuzhiyun r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
2416*4882a593Smuzhiyun if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2417*4882a593Smuzhiyun r = -EFAULT;
2418*4882a593Smuzhiyun break;
2419*4882a593Smuzhiyun }
2420*4882a593Smuzhiyun case KVM_PPC_GET_CPU_CHAR: {
2421*4882a593Smuzhiyun struct kvm_ppc_cpu_char cpuchar;
2422*4882a593Smuzhiyun
2423*4882a593Smuzhiyun r = kvmppc_get_cpu_char(&cpuchar);
2424*4882a593Smuzhiyun if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
2425*4882a593Smuzhiyun r = -EFAULT;
2426*4882a593Smuzhiyun break;
2427*4882a593Smuzhiyun }
2428*4882a593Smuzhiyun case KVM_PPC_SVM_OFF: {
2429*4882a593Smuzhiyun struct kvm *kvm = filp->private_data;
2430*4882a593Smuzhiyun
2431*4882a593Smuzhiyun r = 0;
2432*4882a593Smuzhiyun if (!kvm->arch.kvm_ops->svm_off)
2433*4882a593Smuzhiyun goto out;
2434*4882a593Smuzhiyun
2435*4882a593Smuzhiyun r = kvm->arch.kvm_ops->svm_off(kvm);
2436*4882a593Smuzhiyun break;
2437*4882a593Smuzhiyun }
2438*4882a593Smuzhiyun default: {
2439*4882a593Smuzhiyun struct kvm *kvm = filp->private_data;
2440*4882a593Smuzhiyun r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
2441*4882a593Smuzhiyun }
2442*4882a593Smuzhiyun #else /* CONFIG_PPC_BOOK3S_64 */
2443*4882a593Smuzhiyun default:
2444*4882a593Smuzhiyun r = -ENOTTY;
2445*4882a593Smuzhiyun #endif
2446*4882a593Smuzhiyun }
2447*4882a593Smuzhiyun out:
2448*4882a593Smuzhiyun return r;
2449*4882a593Smuzhiyun }
2450*4882a593Smuzhiyun
2451*4882a593Smuzhiyun static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
2452*4882a593Smuzhiyun static unsigned long nr_lpids;
2453*4882a593Smuzhiyun
kvmppc_alloc_lpid(void)2454*4882a593Smuzhiyun long kvmppc_alloc_lpid(void)
2455*4882a593Smuzhiyun {
2456*4882a593Smuzhiyun long lpid;
2457*4882a593Smuzhiyun
2458*4882a593Smuzhiyun do {
2459*4882a593Smuzhiyun lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
2460*4882a593Smuzhiyun if (lpid >= nr_lpids) {
2461*4882a593Smuzhiyun pr_err("%s: No LPIDs free\n", __func__);
2462*4882a593Smuzhiyun return -ENOMEM;
2463*4882a593Smuzhiyun }
2464*4882a593Smuzhiyun } while (test_and_set_bit(lpid, lpid_inuse));
2465*4882a593Smuzhiyun
2466*4882a593Smuzhiyun return lpid;
2467*4882a593Smuzhiyun }
2468*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
2469*4882a593Smuzhiyun
kvmppc_claim_lpid(long lpid)2470*4882a593Smuzhiyun void kvmppc_claim_lpid(long lpid)
2471*4882a593Smuzhiyun {
2472*4882a593Smuzhiyun set_bit(lpid, lpid_inuse);
2473*4882a593Smuzhiyun }
2474*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
2475*4882a593Smuzhiyun
kvmppc_free_lpid(long lpid)2476*4882a593Smuzhiyun void kvmppc_free_lpid(long lpid)
2477*4882a593Smuzhiyun {
2478*4882a593Smuzhiyun clear_bit(lpid, lpid_inuse);
2479*4882a593Smuzhiyun }
2480*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
2481*4882a593Smuzhiyun
kvmppc_init_lpid(unsigned long nr_lpids_param)2482*4882a593Smuzhiyun void kvmppc_init_lpid(unsigned long nr_lpids_param)
2483*4882a593Smuzhiyun {
2484*4882a593Smuzhiyun nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
2485*4882a593Smuzhiyun memset(lpid_inuse, 0, sizeof(lpid_inuse));
2486*4882a593Smuzhiyun }
2487*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
2488*4882a593Smuzhiyun
kvm_arch_init(void * opaque)2489*4882a593Smuzhiyun int kvm_arch_init(void *opaque)
2490*4882a593Smuzhiyun {
2491*4882a593Smuzhiyun return 0;
2492*4882a593Smuzhiyun }
2493*4882a593Smuzhiyun
2494*4882a593Smuzhiyun EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);
2495