1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright IBM Corp. 2007
5*4882a593Smuzhiyun * Copyright 2011 Freescale Semiconductor, Inc.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Authors: Hollis Blanchard <hollisb@us.ibm.com>
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/jiffies.h>
11*4882a593Smuzhiyun #include <linux/hrtimer.h>
12*4882a593Smuzhiyun #include <linux/types.h>
13*4882a593Smuzhiyun #include <linux/string.h>
14*4882a593Smuzhiyun #include <linux/kvm_host.h>
15*4882a593Smuzhiyun #include <linux/clockchips.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include <asm/reg.h>
18*4882a593Smuzhiyun #include <asm/time.h>
19*4882a593Smuzhiyun #include <asm/byteorder.h>
20*4882a593Smuzhiyun #include <asm/kvm_ppc.h>
21*4882a593Smuzhiyun #include <asm/disassemble.h>
22*4882a593Smuzhiyun #include <asm/ppc-opcode.h>
23*4882a593Smuzhiyun #include <asm/sstep.h>
24*4882a593Smuzhiyun #include "timing.h"
25*4882a593Smuzhiyun #include "trace.h"
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #ifdef CONFIG_PPC_FPU
kvmppc_check_fp_disabled(struct kvm_vcpu * vcpu)28*4882a593Smuzhiyun static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
31*4882a593Smuzhiyun kvmppc_core_queue_fpunavail(vcpu);
32*4882a593Smuzhiyun return true;
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun return false;
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun #endif /* CONFIG_PPC_FPU */
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun #ifdef CONFIG_VSX
kvmppc_check_vsx_disabled(struct kvm_vcpu * vcpu)40*4882a593Smuzhiyun static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
43*4882a593Smuzhiyun kvmppc_core_queue_vsx_unavail(vcpu);
44*4882a593Smuzhiyun return true;
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun return false;
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun #endif /* CONFIG_VSX */
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun #ifdef CONFIG_ALTIVEC
kvmppc_check_altivec_disabled(struct kvm_vcpu * vcpu)52*4882a593Smuzhiyun static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
55*4882a593Smuzhiyun kvmppc_core_queue_vec_unavail(vcpu);
56*4882a593Smuzhiyun return true;
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun return false;
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun #endif /* CONFIG_ALTIVEC */
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun /*
64*4882a593Smuzhiyun * XXX to do:
65*4882a593Smuzhiyun * lfiwax, lfiwzx
66*4882a593Smuzhiyun * vector loads and stores
67*4882a593Smuzhiyun *
68*4882a593Smuzhiyun * Instructions that trap when used on cache-inhibited mappings
69*4882a593Smuzhiyun * are not emulated here: multiple and string instructions,
70*4882a593Smuzhiyun * lq/stq, and the load-reserve/store-conditional instructions.
71*4882a593Smuzhiyun */
kvmppc_emulate_loadstore(struct kvm_vcpu * vcpu)72*4882a593Smuzhiyun int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun u32 inst;
75*4882a593Smuzhiyun enum emulation_result emulated = EMULATE_FAIL;
76*4882a593Smuzhiyun int advance = 1;
77*4882a593Smuzhiyun struct instruction_op op;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /* this default type might be overwritten by subcategories */
80*4882a593Smuzhiyun kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
83*4882a593Smuzhiyun if (emulated != EMULATE_DONE)
84*4882a593Smuzhiyun return emulated;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun vcpu->arch.mmio_vsx_copy_nums = 0;
87*4882a593Smuzhiyun vcpu->arch.mmio_vsx_offset = 0;
88*4882a593Smuzhiyun vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE;
89*4882a593Smuzhiyun vcpu->arch.mmio_sp64_extend = 0;
90*4882a593Smuzhiyun vcpu->arch.mmio_sign_extend = 0;
91*4882a593Smuzhiyun vcpu->arch.mmio_vmx_copy_nums = 0;
92*4882a593Smuzhiyun vcpu->arch.mmio_vmx_offset = 0;
93*4882a593Smuzhiyun vcpu->arch.mmio_host_swabbed = 0;
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun emulated = EMULATE_FAIL;
96*4882a593Smuzhiyun vcpu->arch.regs.msr = vcpu->arch.shared->msr;
97*4882a593Smuzhiyun if (analyse_instr(&op, &vcpu->arch.regs, ppc_inst(inst)) == 0) {
98*4882a593Smuzhiyun int type = op.type & INSTR_TYPE_MASK;
99*4882a593Smuzhiyun int size = GETSIZE(op.type);
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun switch (type) {
102*4882a593Smuzhiyun case LOAD: {
103*4882a593Smuzhiyun int instr_byte_swap = op.type & BYTEREV;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun if (op.type & SIGNEXT)
106*4882a593Smuzhiyun emulated = kvmppc_handle_loads(vcpu,
107*4882a593Smuzhiyun op.reg, size, !instr_byte_swap);
108*4882a593Smuzhiyun else
109*4882a593Smuzhiyun emulated = kvmppc_handle_load(vcpu,
110*4882a593Smuzhiyun op.reg, size, !instr_byte_swap);
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
113*4882a593Smuzhiyun kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun break;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun #ifdef CONFIG_PPC_FPU
118*4882a593Smuzhiyun case LOAD_FP:
119*4882a593Smuzhiyun if (kvmppc_check_fp_disabled(vcpu))
120*4882a593Smuzhiyun return EMULATE_DONE;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun if (op.type & FPCONV)
123*4882a593Smuzhiyun vcpu->arch.mmio_sp64_extend = 1;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun if (op.type & SIGNEXT)
126*4882a593Smuzhiyun emulated = kvmppc_handle_loads(vcpu,
127*4882a593Smuzhiyun KVM_MMIO_REG_FPR|op.reg, size, 1);
128*4882a593Smuzhiyun else
129*4882a593Smuzhiyun emulated = kvmppc_handle_load(vcpu,
130*4882a593Smuzhiyun KVM_MMIO_REG_FPR|op.reg, size, 1);
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
133*4882a593Smuzhiyun kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun break;
136*4882a593Smuzhiyun #endif
137*4882a593Smuzhiyun #ifdef CONFIG_ALTIVEC
138*4882a593Smuzhiyun case LOAD_VMX:
139*4882a593Smuzhiyun if (kvmppc_check_altivec_disabled(vcpu))
140*4882a593Smuzhiyun return EMULATE_DONE;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /* Hardware enforces alignment of VMX accesses */
143*4882a593Smuzhiyun vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
144*4882a593Smuzhiyun vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun if (size == 16) { /* lvx */
147*4882a593Smuzhiyun vcpu->arch.mmio_copy_type =
148*4882a593Smuzhiyun KVMPPC_VMX_COPY_DWORD;
149*4882a593Smuzhiyun } else if (size == 4) { /* lvewx */
150*4882a593Smuzhiyun vcpu->arch.mmio_copy_type =
151*4882a593Smuzhiyun KVMPPC_VMX_COPY_WORD;
152*4882a593Smuzhiyun } else if (size == 2) { /* lvehx */
153*4882a593Smuzhiyun vcpu->arch.mmio_copy_type =
154*4882a593Smuzhiyun KVMPPC_VMX_COPY_HWORD;
155*4882a593Smuzhiyun } else if (size == 1) { /* lvebx */
156*4882a593Smuzhiyun vcpu->arch.mmio_copy_type =
157*4882a593Smuzhiyun KVMPPC_VMX_COPY_BYTE;
158*4882a593Smuzhiyun } else
159*4882a593Smuzhiyun break;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun vcpu->arch.mmio_vmx_offset =
162*4882a593Smuzhiyun (vcpu->arch.vaddr_accessed & 0xf)/size;
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun if (size == 16) {
165*4882a593Smuzhiyun vcpu->arch.mmio_vmx_copy_nums = 2;
166*4882a593Smuzhiyun emulated = kvmppc_handle_vmx_load(vcpu,
167*4882a593Smuzhiyun KVM_MMIO_REG_VMX|op.reg,
168*4882a593Smuzhiyun 8, 1);
169*4882a593Smuzhiyun } else {
170*4882a593Smuzhiyun vcpu->arch.mmio_vmx_copy_nums = 1;
171*4882a593Smuzhiyun emulated = kvmppc_handle_vmx_load(vcpu,
172*4882a593Smuzhiyun KVM_MMIO_REG_VMX|op.reg,
173*4882a593Smuzhiyun size, 1);
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun break;
176*4882a593Smuzhiyun #endif
177*4882a593Smuzhiyun #ifdef CONFIG_VSX
178*4882a593Smuzhiyun case LOAD_VSX: {
179*4882a593Smuzhiyun int io_size_each;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun if (op.vsx_flags & VSX_CHECK_VEC) {
182*4882a593Smuzhiyun if (kvmppc_check_altivec_disabled(vcpu))
183*4882a593Smuzhiyun return EMULATE_DONE;
184*4882a593Smuzhiyun } else {
185*4882a593Smuzhiyun if (kvmppc_check_vsx_disabled(vcpu))
186*4882a593Smuzhiyun return EMULATE_DONE;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun if (op.vsx_flags & VSX_FPCONV)
190*4882a593Smuzhiyun vcpu->arch.mmio_sp64_extend = 1;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun if (op.element_size == 8) {
193*4882a593Smuzhiyun if (op.vsx_flags & VSX_SPLAT)
194*4882a593Smuzhiyun vcpu->arch.mmio_copy_type =
195*4882a593Smuzhiyun KVMPPC_VSX_COPY_DWORD_LOAD_DUMP;
196*4882a593Smuzhiyun else
197*4882a593Smuzhiyun vcpu->arch.mmio_copy_type =
198*4882a593Smuzhiyun KVMPPC_VSX_COPY_DWORD;
199*4882a593Smuzhiyun } else if (op.element_size == 4) {
200*4882a593Smuzhiyun if (op.vsx_flags & VSX_SPLAT)
201*4882a593Smuzhiyun vcpu->arch.mmio_copy_type =
202*4882a593Smuzhiyun KVMPPC_VSX_COPY_WORD_LOAD_DUMP;
203*4882a593Smuzhiyun else
204*4882a593Smuzhiyun vcpu->arch.mmio_copy_type =
205*4882a593Smuzhiyun KVMPPC_VSX_COPY_WORD;
206*4882a593Smuzhiyun } else
207*4882a593Smuzhiyun break;
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun if (size < op.element_size) {
210*4882a593Smuzhiyun /* precision convert case: lxsspx, etc */
211*4882a593Smuzhiyun vcpu->arch.mmio_vsx_copy_nums = 1;
212*4882a593Smuzhiyun io_size_each = size;
213*4882a593Smuzhiyun } else { /* lxvw4x, lxvd2x, etc */
214*4882a593Smuzhiyun vcpu->arch.mmio_vsx_copy_nums =
215*4882a593Smuzhiyun size/op.element_size;
216*4882a593Smuzhiyun io_size_each = op.element_size;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun emulated = kvmppc_handle_vsx_load(vcpu,
220*4882a593Smuzhiyun KVM_MMIO_REG_VSX|op.reg, io_size_each,
221*4882a593Smuzhiyun 1, op.type & SIGNEXT);
222*4882a593Smuzhiyun break;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun #endif
225*4882a593Smuzhiyun case STORE:
226*4882a593Smuzhiyun /* if need byte reverse, op.val has been reversed by
227*4882a593Smuzhiyun * analyse_instr().
228*4882a593Smuzhiyun */
229*4882a593Smuzhiyun emulated = kvmppc_handle_store(vcpu, op.val, size, 1);
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
232*4882a593Smuzhiyun kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun break;
235*4882a593Smuzhiyun #ifdef CONFIG_PPC_FPU
236*4882a593Smuzhiyun case STORE_FP:
237*4882a593Smuzhiyun if (kvmppc_check_fp_disabled(vcpu))
238*4882a593Smuzhiyun return EMULATE_DONE;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun /* The FP registers need to be flushed so that
241*4882a593Smuzhiyun * kvmppc_handle_store() can read actual FP vals
242*4882a593Smuzhiyun * from vcpu->arch.
243*4882a593Smuzhiyun */
244*4882a593Smuzhiyun if (vcpu->kvm->arch.kvm_ops->giveup_ext)
245*4882a593Smuzhiyun vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
246*4882a593Smuzhiyun MSR_FP);
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun if (op.type & FPCONV)
249*4882a593Smuzhiyun vcpu->arch.mmio_sp64_extend = 1;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun emulated = kvmppc_handle_store(vcpu,
252*4882a593Smuzhiyun VCPU_FPR(vcpu, op.reg), size, 1);
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
255*4882a593Smuzhiyun kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun break;
258*4882a593Smuzhiyun #endif
259*4882a593Smuzhiyun #ifdef CONFIG_ALTIVEC
260*4882a593Smuzhiyun case STORE_VMX:
261*4882a593Smuzhiyun if (kvmppc_check_altivec_disabled(vcpu))
262*4882a593Smuzhiyun return EMULATE_DONE;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun /* Hardware enforces alignment of VMX accesses. */
265*4882a593Smuzhiyun vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
266*4882a593Smuzhiyun vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun if (vcpu->kvm->arch.kvm_ops->giveup_ext)
269*4882a593Smuzhiyun vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
270*4882a593Smuzhiyun MSR_VEC);
271*4882a593Smuzhiyun if (size == 16) { /* stvx */
272*4882a593Smuzhiyun vcpu->arch.mmio_copy_type =
273*4882a593Smuzhiyun KVMPPC_VMX_COPY_DWORD;
274*4882a593Smuzhiyun } else if (size == 4) { /* stvewx */
275*4882a593Smuzhiyun vcpu->arch.mmio_copy_type =
276*4882a593Smuzhiyun KVMPPC_VMX_COPY_WORD;
277*4882a593Smuzhiyun } else if (size == 2) { /* stvehx */
278*4882a593Smuzhiyun vcpu->arch.mmio_copy_type =
279*4882a593Smuzhiyun KVMPPC_VMX_COPY_HWORD;
280*4882a593Smuzhiyun } else if (size == 1) { /* stvebx */
281*4882a593Smuzhiyun vcpu->arch.mmio_copy_type =
282*4882a593Smuzhiyun KVMPPC_VMX_COPY_BYTE;
283*4882a593Smuzhiyun } else
284*4882a593Smuzhiyun break;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun vcpu->arch.mmio_vmx_offset =
287*4882a593Smuzhiyun (vcpu->arch.vaddr_accessed & 0xf)/size;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun if (size == 16) {
290*4882a593Smuzhiyun vcpu->arch.mmio_vmx_copy_nums = 2;
291*4882a593Smuzhiyun emulated = kvmppc_handle_vmx_store(vcpu,
292*4882a593Smuzhiyun op.reg, 8, 1);
293*4882a593Smuzhiyun } else {
294*4882a593Smuzhiyun vcpu->arch.mmio_vmx_copy_nums = 1;
295*4882a593Smuzhiyun emulated = kvmppc_handle_vmx_store(vcpu,
296*4882a593Smuzhiyun op.reg, size, 1);
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun break;
300*4882a593Smuzhiyun #endif
301*4882a593Smuzhiyun #ifdef CONFIG_VSX
302*4882a593Smuzhiyun case STORE_VSX: {
303*4882a593Smuzhiyun int io_size_each;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun if (op.vsx_flags & VSX_CHECK_VEC) {
306*4882a593Smuzhiyun if (kvmppc_check_altivec_disabled(vcpu))
307*4882a593Smuzhiyun return EMULATE_DONE;
308*4882a593Smuzhiyun } else {
309*4882a593Smuzhiyun if (kvmppc_check_vsx_disabled(vcpu))
310*4882a593Smuzhiyun return EMULATE_DONE;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun if (vcpu->kvm->arch.kvm_ops->giveup_ext)
314*4882a593Smuzhiyun vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
315*4882a593Smuzhiyun MSR_VSX);
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun if (op.vsx_flags & VSX_FPCONV)
318*4882a593Smuzhiyun vcpu->arch.mmio_sp64_extend = 1;
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun if (op.element_size == 8)
321*4882a593Smuzhiyun vcpu->arch.mmio_copy_type =
322*4882a593Smuzhiyun KVMPPC_VSX_COPY_DWORD;
323*4882a593Smuzhiyun else if (op.element_size == 4)
324*4882a593Smuzhiyun vcpu->arch.mmio_copy_type =
325*4882a593Smuzhiyun KVMPPC_VSX_COPY_WORD;
326*4882a593Smuzhiyun else
327*4882a593Smuzhiyun break;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun if (size < op.element_size) {
330*4882a593Smuzhiyun /* precise conversion case, like stxsspx */
331*4882a593Smuzhiyun vcpu->arch.mmio_vsx_copy_nums = 1;
332*4882a593Smuzhiyun io_size_each = size;
333*4882a593Smuzhiyun } else { /* stxvw4x, stxvd2x, etc */
334*4882a593Smuzhiyun vcpu->arch.mmio_vsx_copy_nums =
335*4882a593Smuzhiyun size/op.element_size;
336*4882a593Smuzhiyun io_size_each = op.element_size;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun emulated = kvmppc_handle_vsx_store(vcpu,
340*4882a593Smuzhiyun op.reg, io_size_each, 1);
341*4882a593Smuzhiyun break;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun #endif
344*4882a593Smuzhiyun case CACHEOP:
345*4882a593Smuzhiyun /* Do nothing. The guest is performing dcbi because
346*4882a593Smuzhiyun * hardware DMA is not snooped by the dcache, but
347*4882a593Smuzhiyun * emulated DMA either goes through the dcache as
348*4882a593Smuzhiyun * normal writes, or the host kernel has handled dcache
349*4882a593Smuzhiyun * coherence.
350*4882a593Smuzhiyun */
351*4882a593Smuzhiyun emulated = EMULATE_DONE;
352*4882a593Smuzhiyun break;
353*4882a593Smuzhiyun default:
354*4882a593Smuzhiyun break;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun if (emulated == EMULATE_FAIL) {
359*4882a593Smuzhiyun advance = 0;
360*4882a593Smuzhiyun kvmppc_core_queue_program(vcpu, 0);
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun /* Advance past emulated instruction. */
366*4882a593Smuzhiyun if (advance)
367*4882a593Smuzhiyun kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun return emulated;
370*4882a593Smuzhiyun }
371