1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * This file is subject to the terms and conditions of the GNU General Public
3*4882a593Smuzhiyun * License. See the file "COPYING" in the main directory of this archive
4*4882a593Smuzhiyun * for more details.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * KVM/MIPS: Support for hardware virtualization extensions
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9*4882a593Smuzhiyun * Authors: Yann Le Du <ledu@kymasys.com>
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/errno.h>
13*4882a593Smuzhiyun #include <linux/err.h>
14*4882a593Smuzhiyun #include <linux/module.h>
15*4882a593Smuzhiyun #include <linux/preempt.h>
16*4882a593Smuzhiyun #include <linux/vmalloc.h>
17*4882a593Smuzhiyun #include <asm/cacheflush.h>
18*4882a593Smuzhiyun #include <asm/cacheops.h>
19*4882a593Smuzhiyun #include <asm/cmpxchg.h>
20*4882a593Smuzhiyun #include <asm/fpu.h>
21*4882a593Smuzhiyun #include <asm/hazards.h>
22*4882a593Smuzhiyun #include <asm/inst.h>
23*4882a593Smuzhiyun #include <asm/mmu_context.h>
24*4882a593Smuzhiyun #include <asm/r4kcache.h>
25*4882a593Smuzhiyun #include <asm/time.h>
26*4882a593Smuzhiyun #include <asm/tlb.h>
27*4882a593Smuzhiyun #include <asm/tlbex.h>
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #include <linux/kvm_host.h>
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #include "interrupt.h"
32*4882a593Smuzhiyun #ifdef CONFIG_CPU_LOONGSON64
33*4882a593Smuzhiyun #include "loongson_regs.h"
34*4882a593Smuzhiyun #endif
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #include "trace.h"
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun /* Pointers to last VCPU loaded on each physical CPU */
39*4882a593Smuzhiyun static struct kvm_vcpu *last_vcpu[NR_CPUS];
40*4882a593Smuzhiyun /* Pointers to last VCPU executed on each physical CPU */
41*4882a593Smuzhiyun static struct kvm_vcpu *last_exec_vcpu[NR_CPUS];
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun /*
44*4882a593Smuzhiyun * Number of guest VTLB entries to use, so we can catch inconsistency between
45*4882a593Smuzhiyun * CPUs.
46*4882a593Smuzhiyun */
47*4882a593Smuzhiyun static unsigned int kvm_vz_guest_vtlb_size;
48*4882a593Smuzhiyun
kvm_vz_read_gc0_ebase(void)49*4882a593Smuzhiyun static inline long kvm_vz_read_gc0_ebase(void)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun if (sizeof(long) == 8 && cpu_has_ebase_wg)
52*4882a593Smuzhiyun return read_gc0_ebase_64();
53*4882a593Smuzhiyun else
54*4882a593Smuzhiyun return read_gc0_ebase();
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun
kvm_vz_write_gc0_ebase(long v)57*4882a593Smuzhiyun static inline void kvm_vz_write_gc0_ebase(long v)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun /*
60*4882a593Smuzhiyun * First write with WG=1 to write upper bits, then write again in case
61*4882a593Smuzhiyun * WG should be left at 0.
62*4882a593Smuzhiyun * write_gc0_ebase_64() is no longer UNDEFINED since R6.
63*4882a593Smuzhiyun */
64*4882a593Smuzhiyun if (sizeof(long) == 8 &&
65*4882a593Smuzhiyun (cpu_has_mips64r6 || cpu_has_ebase_wg)) {
66*4882a593Smuzhiyun write_gc0_ebase_64(v | MIPS_EBASE_WG);
67*4882a593Smuzhiyun write_gc0_ebase_64(v);
68*4882a593Smuzhiyun } else {
69*4882a593Smuzhiyun write_gc0_ebase(v | MIPS_EBASE_WG);
70*4882a593Smuzhiyun write_gc0_ebase(v);
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /*
75*4882a593Smuzhiyun * These Config bits may be writable by the guest:
76*4882a593Smuzhiyun * Config: [K23, KU] (!TLB), K0
77*4882a593Smuzhiyun * Config1: (none)
78*4882a593Smuzhiyun * Config2: [TU, SU] (impl)
79*4882a593Smuzhiyun * Config3: ISAOnExc
80*4882a593Smuzhiyun * Config4: FTLBPageSize
81*4882a593Smuzhiyun * Config5: K, CV, MSAEn, UFE, FRE, SBRI, UFR
82*4882a593Smuzhiyun */
83*4882a593Smuzhiyun
kvm_vz_config_guest_wrmask(struct kvm_vcpu * vcpu)84*4882a593Smuzhiyun static inline unsigned int kvm_vz_config_guest_wrmask(struct kvm_vcpu *vcpu)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun return CONF_CM_CMASK;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
kvm_vz_config1_guest_wrmask(struct kvm_vcpu * vcpu)89*4882a593Smuzhiyun static inline unsigned int kvm_vz_config1_guest_wrmask(struct kvm_vcpu *vcpu)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun return 0;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun
kvm_vz_config2_guest_wrmask(struct kvm_vcpu * vcpu)94*4882a593Smuzhiyun static inline unsigned int kvm_vz_config2_guest_wrmask(struct kvm_vcpu *vcpu)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun return 0;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
kvm_vz_config3_guest_wrmask(struct kvm_vcpu * vcpu)99*4882a593Smuzhiyun static inline unsigned int kvm_vz_config3_guest_wrmask(struct kvm_vcpu *vcpu)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun return MIPS_CONF3_ISA_OE;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun
kvm_vz_config4_guest_wrmask(struct kvm_vcpu * vcpu)104*4882a593Smuzhiyun static inline unsigned int kvm_vz_config4_guest_wrmask(struct kvm_vcpu *vcpu)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun /* no need to be exact */
107*4882a593Smuzhiyun return MIPS_CONF4_VFTLBPAGESIZE;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
kvm_vz_config5_guest_wrmask(struct kvm_vcpu * vcpu)110*4882a593Smuzhiyun static inline unsigned int kvm_vz_config5_guest_wrmask(struct kvm_vcpu *vcpu)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun unsigned int mask = MIPS_CONF5_K | MIPS_CONF5_CV | MIPS_CONF5_SBRI;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun /* Permit MSAEn changes if MSA supported and enabled */
115*4882a593Smuzhiyun if (kvm_mips_guest_has_msa(&vcpu->arch))
116*4882a593Smuzhiyun mask |= MIPS_CONF5_MSAEN;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun /*
119*4882a593Smuzhiyun * Permit guest FPU mode changes if FPU is enabled and the relevant
120*4882a593Smuzhiyun * feature exists according to FIR register.
121*4882a593Smuzhiyun */
122*4882a593Smuzhiyun if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
123*4882a593Smuzhiyun if (cpu_has_ufr)
124*4882a593Smuzhiyun mask |= MIPS_CONF5_UFR;
125*4882a593Smuzhiyun if (cpu_has_fre)
126*4882a593Smuzhiyun mask |= MIPS_CONF5_FRE | MIPS_CONF5_UFE;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun return mask;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
kvm_vz_config6_guest_wrmask(struct kvm_vcpu * vcpu)132*4882a593Smuzhiyun static inline unsigned int kvm_vz_config6_guest_wrmask(struct kvm_vcpu *vcpu)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun return LOONGSON_CONF6_INTIMER | LOONGSON_CONF6_EXTIMER;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun /*
138*4882a593Smuzhiyun * VZ optionally allows these additional Config bits to be written by root:
139*4882a593Smuzhiyun * Config: M, [MT]
140*4882a593Smuzhiyun * Config1: M, [MMUSize-1, C2, MD, PC, WR, CA], FP
141*4882a593Smuzhiyun * Config2: M
142*4882a593Smuzhiyun * Config3: M, MSAP, [BPG], ULRI, [DSP2P, DSPP], CTXTC, [ITL, LPA, VEIC,
143*4882a593Smuzhiyun * VInt, SP, CDMM, MT, SM, TL]
144*4882a593Smuzhiyun * Config4: M, [VTLBSizeExt, MMUSizeExt]
145*4882a593Smuzhiyun * Config5: MRP
146*4882a593Smuzhiyun */
147*4882a593Smuzhiyun
kvm_vz_config_user_wrmask(struct kvm_vcpu * vcpu)148*4882a593Smuzhiyun static inline unsigned int kvm_vz_config_user_wrmask(struct kvm_vcpu *vcpu)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun return kvm_vz_config_guest_wrmask(vcpu) | MIPS_CONF_M;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
kvm_vz_config1_user_wrmask(struct kvm_vcpu * vcpu)153*4882a593Smuzhiyun static inline unsigned int kvm_vz_config1_user_wrmask(struct kvm_vcpu *vcpu)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun unsigned int mask = kvm_vz_config1_guest_wrmask(vcpu) | MIPS_CONF_M;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun /* Permit FPU to be present if FPU is supported */
158*4882a593Smuzhiyun if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
159*4882a593Smuzhiyun mask |= MIPS_CONF1_FP;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun return mask;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
kvm_vz_config2_user_wrmask(struct kvm_vcpu * vcpu)164*4882a593Smuzhiyun static inline unsigned int kvm_vz_config2_user_wrmask(struct kvm_vcpu *vcpu)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun return kvm_vz_config2_guest_wrmask(vcpu) | MIPS_CONF_M;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
kvm_vz_config3_user_wrmask(struct kvm_vcpu * vcpu)169*4882a593Smuzhiyun static inline unsigned int kvm_vz_config3_user_wrmask(struct kvm_vcpu *vcpu)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun unsigned int mask = kvm_vz_config3_guest_wrmask(vcpu) | MIPS_CONF_M |
172*4882a593Smuzhiyun MIPS_CONF3_ULRI | MIPS_CONF3_CTXTC;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun /* Permit MSA to be present if MSA is supported */
175*4882a593Smuzhiyun if (kvm_mips_guest_can_have_msa(&vcpu->arch))
176*4882a593Smuzhiyun mask |= MIPS_CONF3_MSA;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun return mask;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
kvm_vz_config4_user_wrmask(struct kvm_vcpu * vcpu)181*4882a593Smuzhiyun static inline unsigned int kvm_vz_config4_user_wrmask(struct kvm_vcpu *vcpu)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun return kvm_vz_config4_guest_wrmask(vcpu) | MIPS_CONF_M;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
kvm_vz_config5_user_wrmask(struct kvm_vcpu * vcpu)186*4882a593Smuzhiyun static inline unsigned int kvm_vz_config5_user_wrmask(struct kvm_vcpu *vcpu)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun return kvm_vz_config5_guest_wrmask(vcpu) | MIPS_CONF5_MRP;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
kvm_vz_config6_user_wrmask(struct kvm_vcpu * vcpu)191*4882a593Smuzhiyun static inline unsigned int kvm_vz_config6_user_wrmask(struct kvm_vcpu *vcpu)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun return kvm_vz_config6_guest_wrmask(vcpu) |
194*4882a593Smuzhiyun LOONGSON_CONF6_SFBEN | LOONGSON_CONF6_FTLBDIS;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
kvm_vz_gva_to_gpa_cb(gva_t gva)197*4882a593Smuzhiyun static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun /* VZ guest has already converted gva to gpa */
200*4882a593Smuzhiyun return gva;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
kvm_vz_queue_irq(struct kvm_vcpu * vcpu,unsigned int priority)203*4882a593Smuzhiyun static void kvm_vz_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun set_bit(priority, &vcpu->arch.pending_exceptions);
206*4882a593Smuzhiyun clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
kvm_vz_dequeue_irq(struct kvm_vcpu * vcpu,unsigned int priority)209*4882a593Smuzhiyun static void kvm_vz_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun clear_bit(priority, &vcpu->arch.pending_exceptions);
212*4882a593Smuzhiyun set_bit(priority, &vcpu->arch.pending_exceptions_clr);
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
kvm_vz_queue_timer_int_cb(struct kvm_vcpu * vcpu)215*4882a593Smuzhiyun static void kvm_vz_queue_timer_int_cb(struct kvm_vcpu *vcpu)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun /*
218*4882a593Smuzhiyun * timer expiry is asynchronous to vcpu execution therefore defer guest
219*4882a593Smuzhiyun * cp0 accesses
220*4882a593Smuzhiyun */
221*4882a593Smuzhiyun kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
kvm_vz_dequeue_timer_int_cb(struct kvm_vcpu * vcpu)224*4882a593Smuzhiyun static void kvm_vz_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun /*
227*4882a593Smuzhiyun * timer expiry is asynchronous to vcpu execution therefore defer guest
228*4882a593Smuzhiyun * cp0 accesses
229*4882a593Smuzhiyun */
230*4882a593Smuzhiyun kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun
kvm_vz_queue_io_int_cb(struct kvm_vcpu * vcpu,struct kvm_mips_interrupt * irq)233*4882a593Smuzhiyun static void kvm_vz_queue_io_int_cb(struct kvm_vcpu *vcpu,
234*4882a593Smuzhiyun struct kvm_mips_interrupt *irq)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun int intr = (int)irq->irq;
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun /*
239*4882a593Smuzhiyun * interrupts are asynchronous to vcpu execution therefore defer guest
240*4882a593Smuzhiyun * cp0 accesses
241*4882a593Smuzhiyun */
242*4882a593Smuzhiyun kvm_vz_queue_irq(vcpu, kvm_irq_to_priority(intr));
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
kvm_vz_dequeue_io_int_cb(struct kvm_vcpu * vcpu,struct kvm_mips_interrupt * irq)245*4882a593Smuzhiyun static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
246*4882a593Smuzhiyun struct kvm_mips_interrupt *irq)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun int intr = (int)irq->irq;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun /*
251*4882a593Smuzhiyun * interrupts are asynchronous to vcpu execution therefore defer guest
252*4882a593Smuzhiyun * cp0 accesses
253*4882a593Smuzhiyun */
254*4882a593Smuzhiyun kvm_vz_dequeue_irq(vcpu, kvm_irq_to_priority(-intr));
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
kvm_vz_irq_deliver_cb(struct kvm_vcpu * vcpu,unsigned int priority,u32 cause)257*4882a593Smuzhiyun static int kvm_vz_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
258*4882a593Smuzhiyun u32 cause)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun u32 irq = (priority < MIPS_EXC_MAX) ?
261*4882a593Smuzhiyun kvm_priority_to_irq[priority] : 0;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun switch (priority) {
264*4882a593Smuzhiyun case MIPS_EXC_INT_TIMER:
265*4882a593Smuzhiyun set_gc0_cause(C_TI);
266*4882a593Smuzhiyun break;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun case MIPS_EXC_INT_IO_1:
269*4882a593Smuzhiyun case MIPS_EXC_INT_IO_2:
270*4882a593Smuzhiyun case MIPS_EXC_INT_IPI_1:
271*4882a593Smuzhiyun case MIPS_EXC_INT_IPI_2:
272*4882a593Smuzhiyun if (cpu_has_guestctl2)
273*4882a593Smuzhiyun set_c0_guestctl2(irq);
274*4882a593Smuzhiyun else
275*4882a593Smuzhiyun set_gc0_cause(irq);
276*4882a593Smuzhiyun break;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun default:
279*4882a593Smuzhiyun break;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun clear_bit(priority, &vcpu->arch.pending_exceptions);
283*4882a593Smuzhiyun return 1;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
kvm_vz_irq_clear_cb(struct kvm_vcpu * vcpu,unsigned int priority,u32 cause)286*4882a593Smuzhiyun static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
287*4882a593Smuzhiyun u32 cause)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun u32 irq = (priority < MIPS_EXC_MAX) ?
290*4882a593Smuzhiyun kvm_priority_to_irq[priority] : 0;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun switch (priority) {
293*4882a593Smuzhiyun case MIPS_EXC_INT_TIMER:
294*4882a593Smuzhiyun /*
295*4882a593Smuzhiyun * Call to kvm_write_c0_guest_compare() clears Cause.TI in
296*4882a593Smuzhiyun * kvm_mips_emulate_CP0(). Explicitly clear irq associated with
297*4882a593Smuzhiyun * Cause.IP[IPTI] if GuestCtl2 virtual interrupt register not
298*4882a593Smuzhiyun * supported or if not using GuestCtl2 Hardware Clear.
299*4882a593Smuzhiyun */
300*4882a593Smuzhiyun if (cpu_has_guestctl2) {
301*4882a593Smuzhiyun if (!(read_c0_guestctl2() & (irq << 14)))
302*4882a593Smuzhiyun clear_c0_guestctl2(irq);
303*4882a593Smuzhiyun } else {
304*4882a593Smuzhiyun clear_gc0_cause(irq);
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun break;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun case MIPS_EXC_INT_IO_1:
309*4882a593Smuzhiyun case MIPS_EXC_INT_IO_2:
310*4882a593Smuzhiyun case MIPS_EXC_INT_IPI_1:
311*4882a593Smuzhiyun case MIPS_EXC_INT_IPI_2:
312*4882a593Smuzhiyun /* Clear GuestCtl2.VIP irq if not using Hardware Clear */
313*4882a593Smuzhiyun if (cpu_has_guestctl2) {
314*4882a593Smuzhiyun if (!(read_c0_guestctl2() & (irq << 14)))
315*4882a593Smuzhiyun clear_c0_guestctl2(irq);
316*4882a593Smuzhiyun } else {
317*4882a593Smuzhiyun clear_gc0_cause(irq);
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun break;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun default:
322*4882a593Smuzhiyun break;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
326*4882a593Smuzhiyun return 1;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun /*
330*4882a593Smuzhiyun * VZ guest timer handling.
331*4882a593Smuzhiyun */
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun /**
334*4882a593Smuzhiyun * kvm_vz_should_use_htimer() - Find whether to use the VZ hard guest timer.
335*4882a593Smuzhiyun * @vcpu: Virtual CPU.
336*4882a593Smuzhiyun *
337*4882a593Smuzhiyun * Returns: true if the VZ GTOffset & real guest CP0_Count should be used
338*4882a593Smuzhiyun * instead of software emulation of guest timer.
339*4882a593Smuzhiyun * false otherwise.
340*4882a593Smuzhiyun */
kvm_vz_should_use_htimer(struct kvm_vcpu * vcpu)341*4882a593Smuzhiyun static bool kvm_vz_should_use_htimer(struct kvm_vcpu *vcpu)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun if (kvm_mips_count_disabled(vcpu))
344*4882a593Smuzhiyun return false;
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun /* Chosen frequency must match real frequency */
347*4882a593Smuzhiyun if (mips_hpt_frequency != vcpu->arch.count_hz)
348*4882a593Smuzhiyun return false;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun /* We don't support a CP0_GTOffset with fewer bits than CP0_Count */
351*4882a593Smuzhiyun if (current_cpu_data.gtoffset_mask != 0xffffffff)
352*4882a593Smuzhiyun return false;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun return true;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun /**
358*4882a593Smuzhiyun * _kvm_vz_restore_stimer() - Restore soft timer state.
359*4882a593Smuzhiyun * @vcpu: Virtual CPU.
360*4882a593Smuzhiyun * @compare: CP0_Compare register value, restored by caller.
361*4882a593Smuzhiyun * @cause: CP0_Cause register to restore.
362*4882a593Smuzhiyun *
363*4882a593Smuzhiyun * Restore VZ state relating to the soft timer. The hard timer can be enabled
364*4882a593Smuzhiyun * later.
365*4882a593Smuzhiyun */
_kvm_vz_restore_stimer(struct kvm_vcpu * vcpu,u32 compare,u32 cause)366*4882a593Smuzhiyun static void _kvm_vz_restore_stimer(struct kvm_vcpu *vcpu, u32 compare,
367*4882a593Smuzhiyun u32 cause)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun /*
370*4882a593Smuzhiyun * Avoid spurious counter interrupts by setting Guest CP0_Count to just
371*4882a593Smuzhiyun * after Guest CP0_Compare.
372*4882a593Smuzhiyun */
373*4882a593Smuzhiyun write_c0_gtoffset(compare - read_c0_count());
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun back_to_back_c0_hazard();
376*4882a593Smuzhiyun write_gc0_cause(cause);
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun /**
380*4882a593Smuzhiyun * _kvm_vz_restore_htimer() - Restore hard timer state.
381*4882a593Smuzhiyun * @vcpu: Virtual CPU.
382*4882a593Smuzhiyun * @compare: CP0_Compare register value, restored by caller.
383*4882a593Smuzhiyun * @cause: CP0_Cause register to restore.
384*4882a593Smuzhiyun *
385*4882a593Smuzhiyun * Restore hard timer Guest.Count & Guest.Cause taking care to preserve the
386*4882a593Smuzhiyun * value of Guest.CP0_Cause.TI while restoring Guest.CP0_Cause.
387*4882a593Smuzhiyun */
_kvm_vz_restore_htimer(struct kvm_vcpu * vcpu,u32 compare,u32 cause)388*4882a593Smuzhiyun static void _kvm_vz_restore_htimer(struct kvm_vcpu *vcpu,
389*4882a593Smuzhiyun u32 compare, u32 cause)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun u32 start_count, after_count;
392*4882a593Smuzhiyun ktime_t freeze_time;
393*4882a593Smuzhiyun unsigned long flags;
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun /*
396*4882a593Smuzhiyun * Freeze the soft-timer and sync the guest CP0_Count with it. We do
397*4882a593Smuzhiyun * this with interrupts disabled to avoid latency.
398*4882a593Smuzhiyun */
399*4882a593Smuzhiyun local_irq_save(flags);
400*4882a593Smuzhiyun freeze_time = kvm_mips_freeze_hrtimer(vcpu, &start_count);
401*4882a593Smuzhiyun write_c0_gtoffset(start_count - read_c0_count());
402*4882a593Smuzhiyun local_irq_restore(flags);
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun /* restore guest CP0_Cause, as TI may already be set */
405*4882a593Smuzhiyun back_to_back_c0_hazard();
406*4882a593Smuzhiyun write_gc0_cause(cause);
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun /*
409*4882a593Smuzhiyun * The above sequence isn't atomic and would result in lost timer
410*4882a593Smuzhiyun * interrupts if we're not careful. Detect if a timer interrupt is due
411*4882a593Smuzhiyun * and assert it.
412*4882a593Smuzhiyun */
413*4882a593Smuzhiyun back_to_back_c0_hazard();
414*4882a593Smuzhiyun after_count = read_gc0_count();
415*4882a593Smuzhiyun if (after_count - start_count > compare - start_count - 1)
416*4882a593Smuzhiyun kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun /**
420*4882a593Smuzhiyun * kvm_vz_restore_timer() - Restore timer state.
421*4882a593Smuzhiyun * @vcpu: Virtual CPU.
422*4882a593Smuzhiyun *
423*4882a593Smuzhiyun * Restore soft timer state from saved context.
424*4882a593Smuzhiyun */
kvm_vz_restore_timer(struct kvm_vcpu * vcpu)425*4882a593Smuzhiyun static void kvm_vz_restore_timer(struct kvm_vcpu *vcpu)
426*4882a593Smuzhiyun {
427*4882a593Smuzhiyun struct mips_coproc *cop0 = vcpu->arch.cop0;
428*4882a593Smuzhiyun u32 cause, compare;
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun compare = kvm_read_sw_gc0_compare(cop0);
431*4882a593Smuzhiyun cause = kvm_read_sw_gc0_cause(cop0);
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun write_gc0_compare(compare);
434*4882a593Smuzhiyun _kvm_vz_restore_stimer(vcpu, compare, cause);
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun /**
438*4882a593Smuzhiyun * kvm_vz_acquire_htimer() - Switch to hard timer state.
439*4882a593Smuzhiyun * @vcpu: Virtual CPU.
440*4882a593Smuzhiyun *
441*4882a593Smuzhiyun * Restore hard timer state on top of existing soft timer state if possible.
442*4882a593Smuzhiyun *
443*4882a593Smuzhiyun * Since hard timer won't remain active over preemption, preemption should be
444*4882a593Smuzhiyun * disabled by the caller.
445*4882a593Smuzhiyun */
kvm_vz_acquire_htimer(struct kvm_vcpu * vcpu)446*4882a593Smuzhiyun void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun u32 gctl0;
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun gctl0 = read_c0_guestctl0();
451*4882a593Smuzhiyun if (!(gctl0 & MIPS_GCTL0_GT) && kvm_vz_should_use_htimer(vcpu)) {
452*4882a593Smuzhiyun /* enable guest access to hard timer */
453*4882a593Smuzhiyun write_c0_guestctl0(gctl0 | MIPS_GCTL0_GT);
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun _kvm_vz_restore_htimer(vcpu, read_gc0_compare(),
456*4882a593Smuzhiyun read_gc0_cause());
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun /**
461*4882a593Smuzhiyun * _kvm_vz_save_htimer() - Switch to software emulation of guest timer.
462*4882a593Smuzhiyun * @vcpu: Virtual CPU.
463*4882a593Smuzhiyun * @compare: Pointer to write compare value to.
464*4882a593Smuzhiyun * @cause: Pointer to write cause value to.
465*4882a593Smuzhiyun *
466*4882a593Smuzhiyun * Save VZ guest timer state and switch to software emulation of guest CP0
467*4882a593Smuzhiyun * timer. The hard timer must already be in use, so preemption should be
468*4882a593Smuzhiyun * disabled.
469*4882a593Smuzhiyun */
_kvm_vz_save_htimer(struct kvm_vcpu * vcpu,u32 * out_compare,u32 * out_cause)470*4882a593Smuzhiyun static void _kvm_vz_save_htimer(struct kvm_vcpu *vcpu,
471*4882a593Smuzhiyun u32 *out_compare, u32 *out_cause)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun u32 cause, compare, before_count, end_count;
474*4882a593Smuzhiyun ktime_t before_time;
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun compare = read_gc0_compare();
477*4882a593Smuzhiyun *out_compare = compare;
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun before_time = ktime_get();
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun /*
482*4882a593Smuzhiyun * Record the CP0_Count *prior* to saving CP0_Cause, so we have a time
483*4882a593Smuzhiyun * at which no pending timer interrupt is missing.
484*4882a593Smuzhiyun */
485*4882a593Smuzhiyun before_count = read_gc0_count();
486*4882a593Smuzhiyun back_to_back_c0_hazard();
487*4882a593Smuzhiyun cause = read_gc0_cause();
488*4882a593Smuzhiyun *out_cause = cause;
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun /*
491*4882a593Smuzhiyun * Record a final CP0_Count which we will transfer to the soft-timer.
492*4882a593Smuzhiyun * This is recorded *after* saving CP0_Cause, so we don't get any timer
493*4882a593Smuzhiyun * interrupts from just after the final CP0_Count point.
494*4882a593Smuzhiyun */
495*4882a593Smuzhiyun back_to_back_c0_hazard();
496*4882a593Smuzhiyun end_count = read_gc0_count();
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun /*
499*4882a593Smuzhiyun * The above sequence isn't atomic, so we could miss a timer interrupt
500*4882a593Smuzhiyun * between reading CP0_Cause and end_count. Detect and record any timer
501*4882a593Smuzhiyun * interrupt due between before_count and end_count.
502*4882a593Smuzhiyun */
503*4882a593Smuzhiyun if (end_count - before_count > compare - before_count - 1)
504*4882a593Smuzhiyun kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun /*
507*4882a593Smuzhiyun * Restore soft-timer, ignoring a small amount of negative drift due to
508*4882a593Smuzhiyun * delay between freeze_hrtimer and setting CP0_GTOffset.
509*4882a593Smuzhiyun */
510*4882a593Smuzhiyun kvm_mips_restore_hrtimer(vcpu, before_time, end_count, -0x10000);
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun /**
514*4882a593Smuzhiyun * kvm_vz_save_timer() - Save guest timer state.
515*4882a593Smuzhiyun * @vcpu: Virtual CPU.
516*4882a593Smuzhiyun *
517*4882a593Smuzhiyun * Save VZ guest timer state and switch to soft guest timer if hard timer was in
518*4882a593Smuzhiyun * use.
519*4882a593Smuzhiyun */
kvm_vz_save_timer(struct kvm_vcpu * vcpu)520*4882a593Smuzhiyun static void kvm_vz_save_timer(struct kvm_vcpu *vcpu)
521*4882a593Smuzhiyun {
522*4882a593Smuzhiyun struct mips_coproc *cop0 = vcpu->arch.cop0;
523*4882a593Smuzhiyun u32 gctl0, compare, cause;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun gctl0 = read_c0_guestctl0();
526*4882a593Smuzhiyun if (gctl0 & MIPS_GCTL0_GT) {
527*4882a593Smuzhiyun /* disable guest use of hard timer */
528*4882a593Smuzhiyun write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT);
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun /* save hard timer state */
531*4882a593Smuzhiyun _kvm_vz_save_htimer(vcpu, &compare, &cause);
532*4882a593Smuzhiyun } else {
533*4882a593Smuzhiyun compare = read_gc0_compare();
534*4882a593Smuzhiyun cause = read_gc0_cause();
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun /* save timer-related state to VCPU context */
538*4882a593Smuzhiyun kvm_write_sw_gc0_cause(cop0, cause);
539*4882a593Smuzhiyun kvm_write_sw_gc0_compare(cop0, compare);
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun /**
543*4882a593Smuzhiyun * kvm_vz_lose_htimer() - Ensure hard guest timer is not in use.
544*4882a593Smuzhiyun * @vcpu: Virtual CPU.
545*4882a593Smuzhiyun *
546*4882a593Smuzhiyun * Transfers the state of the hard guest timer to the soft guest timer, leaving
547*4882a593Smuzhiyun * guest state intact so it can continue to be used with the soft timer.
548*4882a593Smuzhiyun */
kvm_vz_lose_htimer(struct kvm_vcpu * vcpu)549*4882a593Smuzhiyun void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun u32 gctl0, compare, cause;
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun preempt_disable();
554*4882a593Smuzhiyun gctl0 = read_c0_guestctl0();
555*4882a593Smuzhiyun if (gctl0 & MIPS_GCTL0_GT) {
556*4882a593Smuzhiyun /* disable guest use of timer */
557*4882a593Smuzhiyun write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT);
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun /* switch to soft timer */
560*4882a593Smuzhiyun _kvm_vz_save_htimer(vcpu, &compare, &cause);
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun /* leave soft timer in usable state */
563*4882a593Smuzhiyun _kvm_vz_restore_stimer(vcpu, compare, cause);
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun preempt_enable();
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun /**
569*4882a593Smuzhiyun * is_eva_access() - Find whether an instruction is an EVA memory accessor.
570*4882a593Smuzhiyun * @inst: 32-bit instruction encoding.
571*4882a593Smuzhiyun *
572*4882a593Smuzhiyun * Finds whether @inst encodes an EVA memory access instruction, which would
573*4882a593Smuzhiyun * indicate that emulation of it should access the user mode address space
574*4882a593Smuzhiyun * instead of the kernel mode address space. This matters for MUSUK segments
575*4882a593Smuzhiyun * which are TLB mapped for user mode but unmapped for kernel mode.
576*4882a593Smuzhiyun *
577*4882a593Smuzhiyun * Returns: Whether @inst encodes an EVA accessor instruction.
578*4882a593Smuzhiyun */
is_eva_access(union mips_instruction inst)579*4882a593Smuzhiyun static bool is_eva_access(union mips_instruction inst)
580*4882a593Smuzhiyun {
581*4882a593Smuzhiyun if (inst.spec3_format.opcode != spec3_op)
582*4882a593Smuzhiyun return false;
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun switch (inst.spec3_format.func) {
585*4882a593Smuzhiyun case lwle_op:
586*4882a593Smuzhiyun case lwre_op:
587*4882a593Smuzhiyun case cachee_op:
588*4882a593Smuzhiyun case sbe_op:
589*4882a593Smuzhiyun case she_op:
590*4882a593Smuzhiyun case sce_op:
591*4882a593Smuzhiyun case swe_op:
592*4882a593Smuzhiyun case swle_op:
593*4882a593Smuzhiyun case swre_op:
594*4882a593Smuzhiyun case prefe_op:
595*4882a593Smuzhiyun case lbue_op:
596*4882a593Smuzhiyun case lhue_op:
597*4882a593Smuzhiyun case lbe_op:
598*4882a593Smuzhiyun case lhe_op:
599*4882a593Smuzhiyun case lle_op:
600*4882a593Smuzhiyun case lwe_op:
601*4882a593Smuzhiyun return true;
602*4882a593Smuzhiyun default:
603*4882a593Smuzhiyun return false;
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun /**
608*4882a593Smuzhiyun * is_eva_am_mapped() - Find whether an access mode is mapped.
609*4882a593Smuzhiyun * @vcpu: KVM VCPU state.
610*4882a593Smuzhiyun * @am: 3-bit encoded access mode.
611*4882a593Smuzhiyun * @eu: Segment becomes unmapped and uncached when Status.ERL=1.
612*4882a593Smuzhiyun *
613*4882a593Smuzhiyun * Decode @am to find whether it encodes a mapped segment for the current VCPU
614*4882a593Smuzhiyun * state. Where necessary @eu and the actual instruction causing the fault are
615*4882a593Smuzhiyun * taken into account to make the decision.
616*4882a593Smuzhiyun *
617*4882a593Smuzhiyun * Returns: Whether the VCPU faulted on a TLB mapped address.
618*4882a593Smuzhiyun */
is_eva_am_mapped(struct kvm_vcpu * vcpu,unsigned int am,bool eu)619*4882a593Smuzhiyun static bool is_eva_am_mapped(struct kvm_vcpu *vcpu, unsigned int am, bool eu)
620*4882a593Smuzhiyun {
621*4882a593Smuzhiyun u32 am_lookup;
622*4882a593Smuzhiyun int err;
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun /*
625*4882a593Smuzhiyun * Interpret access control mode. We assume address errors will already
626*4882a593Smuzhiyun * have been caught by the guest, leaving us with:
627*4882a593Smuzhiyun * AM UM SM KM 31..24 23..16
628*4882a593Smuzhiyun * UK 0 000 Unm 0 0
629*4882a593Smuzhiyun * MK 1 001 TLB 1
630*4882a593Smuzhiyun * MSK 2 010 TLB TLB 1
631*4882a593Smuzhiyun * MUSK 3 011 TLB TLB TLB 1
632*4882a593Smuzhiyun * MUSUK 4 100 TLB TLB Unm 0 1
633*4882a593Smuzhiyun * USK 5 101 Unm Unm 0 0
634*4882a593Smuzhiyun * - 6 110 0 0
635*4882a593Smuzhiyun * UUSK 7 111 Unm Unm Unm 0 0
636*4882a593Smuzhiyun *
637*4882a593Smuzhiyun * We shift a magic value by AM across the sign bit to find if always
638*4882a593Smuzhiyun * TLB mapped, and if not shift by 8 again to find if it depends on KM.
639*4882a593Smuzhiyun */
640*4882a593Smuzhiyun am_lookup = 0x70080000 << am;
641*4882a593Smuzhiyun if ((s32)am_lookup < 0) {
642*4882a593Smuzhiyun /*
643*4882a593Smuzhiyun * MK, MSK, MUSK
644*4882a593Smuzhiyun * Always TLB mapped, unless SegCtl.EU && ERL
645*4882a593Smuzhiyun */
646*4882a593Smuzhiyun if (!eu || !(read_gc0_status() & ST0_ERL))
647*4882a593Smuzhiyun return true;
648*4882a593Smuzhiyun } else {
649*4882a593Smuzhiyun am_lookup <<= 8;
650*4882a593Smuzhiyun if ((s32)am_lookup < 0) {
651*4882a593Smuzhiyun union mips_instruction inst;
652*4882a593Smuzhiyun unsigned int status;
653*4882a593Smuzhiyun u32 *opc;
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun /*
656*4882a593Smuzhiyun * MUSUK
657*4882a593Smuzhiyun * TLB mapped if not in kernel mode
658*4882a593Smuzhiyun */
659*4882a593Smuzhiyun status = read_gc0_status();
660*4882a593Smuzhiyun if (!(status & (ST0_EXL | ST0_ERL)) &&
661*4882a593Smuzhiyun (status & ST0_KSU))
662*4882a593Smuzhiyun return true;
663*4882a593Smuzhiyun /*
664*4882a593Smuzhiyun * EVA access instructions in kernel
665*4882a593Smuzhiyun * mode access user address space.
666*4882a593Smuzhiyun */
667*4882a593Smuzhiyun opc = (u32 *)vcpu->arch.pc;
668*4882a593Smuzhiyun if (vcpu->arch.host_cp0_cause & CAUSEF_BD)
669*4882a593Smuzhiyun opc += 1;
670*4882a593Smuzhiyun err = kvm_get_badinstr(opc, vcpu, &inst.word);
671*4882a593Smuzhiyun if (!err && is_eva_access(inst))
672*4882a593Smuzhiyun return true;
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun return false;
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun /**
680*4882a593Smuzhiyun * kvm_vz_gva_to_gpa() - Convert valid GVA to GPA.
681*4882a593Smuzhiyun * @vcpu: KVM VCPU state.
682*4882a593Smuzhiyun * @gva: Guest virtual address to convert.
683*4882a593Smuzhiyun * @gpa: Output guest physical address.
684*4882a593Smuzhiyun *
685*4882a593Smuzhiyun * Convert a guest virtual address (GVA) which is valid according to the guest
686*4882a593Smuzhiyun * context, to a guest physical address (GPA).
687*4882a593Smuzhiyun *
688*4882a593Smuzhiyun * Returns: 0 on success.
689*4882a593Smuzhiyun * -errno on failure.
690*4882a593Smuzhiyun */
kvm_vz_gva_to_gpa(struct kvm_vcpu * vcpu,unsigned long gva,unsigned long * gpa)691*4882a593Smuzhiyun static int kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
692*4882a593Smuzhiyun unsigned long *gpa)
693*4882a593Smuzhiyun {
694*4882a593Smuzhiyun u32 gva32 = gva;
695*4882a593Smuzhiyun unsigned long segctl;
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun if ((long)gva == (s32)gva32) {
698*4882a593Smuzhiyun /* Handle canonical 32-bit virtual address */
699*4882a593Smuzhiyun if (cpu_guest_has_segments) {
700*4882a593Smuzhiyun unsigned long mask, pa;
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun switch (gva32 >> 29) {
703*4882a593Smuzhiyun case 0:
704*4882a593Smuzhiyun case 1: /* CFG5 (1GB) */
705*4882a593Smuzhiyun segctl = read_gc0_segctl2() >> 16;
706*4882a593Smuzhiyun mask = (unsigned long)0xfc0000000ull;
707*4882a593Smuzhiyun break;
708*4882a593Smuzhiyun case 2:
709*4882a593Smuzhiyun case 3: /* CFG4 (1GB) */
710*4882a593Smuzhiyun segctl = read_gc0_segctl2();
711*4882a593Smuzhiyun mask = (unsigned long)0xfc0000000ull;
712*4882a593Smuzhiyun break;
713*4882a593Smuzhiyun case 4: /* CFG3 (512MB) */
714*4882a593Smuzhiyun segctl = read_gc0_segctl1() >> 16;
715*4882a593Smuzhiyun mask = (unsigned long)0xfe0000000ull;
716*4882a593Smuzhiyun break;
717*4882a593Smuzhiyun case 5: /* CFG2 (512MB) */
718*4882a593Smuzhiyun segctl = read_gc0_segctl1();
719*4882a593Smuzhiyun mask = (unsigned long)0xfe0000000ull;
720*4882a593Smuzhiyun break;
721*4882a593Smuzhiyun case 6: /* CFG1 (512MB) */
722*4882a593Smuzhiyun segctl = read_gc0_segctl0() >> 16;
723*4882a593Smuzhiyun mask = (unsigned long)0xfe0000000ull;
724*4882a593Smuzhiyun break;
725*4882a593Smuzhiyun case 7: /* CFG0 (512MB) */
726*4882a593Smuzhiyun segctl = read_gc0_segctl0();
727*4882a593Smuzhiyun mask = (unsigned long)0xfe0000000ull;
728*4882a593Smuzhiyun break;
729*4882a593Smuzhiyun default:
730*4882a593Smuzhiyun /*
731*4882a593Smuzhiyun * GCC 4.9 isn't smart enough to figure out that
732*4882a593Smuzhiyun * segctl and mask are always initialised.
733*4882a593Smuzhiyun */
734*4882a593Smuzhiyun unreachable();
735*4882a593Smuzhiyun }
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun if (is_eva_am_mapped(vcpu, (segctl >> 4) & 0x7,
738*4882a593Smuzhiyun segctl & 0x0008))
739*4882a593Smuzhiyun goto tlb_mapped;
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun /* Unmapped, find guest physical address */
742*4882a593Smuzhiyun pa = (segctl << 20) & mask;
743*4882a593Smuzhiyun pa |= gva32 & ~mask;
744*4882a593Smuzhiyun *gpa = pa;
745*4882a593Smuzhiyun return 0;
746*4882a593Smuzhiyun } else if ((s32)gva32 < (s32)0xc0000000) {
747*4882a593Smuzhiyun /* legacy unmapped KSeg0 or KSeg1 */
748*4882a593Smuzhiyun *gpa = gva32 & 0x1fffffff;
749*4882a593Smuzhiyun return 0;
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun #ifdef CONFIG_64BIT
752*4882a593Smuzhiyun } else if ((gva & 0xc000000000000000) == 0x8000000000000000) {
753*4882a593Smuzhiyun /* XKPHYS */
754*4882a593Smuzhiyun if (cpu_guest_has_segments) {
755*4882a593Smuzhiyun /*
756*4882a593Smuzhiyun * Each of the 8 regions can be overridden by SegCtl2.XR
757*4882a593Smuzhiyun * to use SegCtl1.XAM.
758*4882a593Smuzhiyun */
759*4882a593Smuzhiyun segctl = read_gc0_segctl2();
760*4882a593Smuzhiyun if (segctl & (1ull << (56 + ((gva >> 59) & 0x7)))) {
761*4882a593Smuzhiyun segctl = read_gc0_segctl1();
762*4882a593Smuzhiyun if (is_eva_am_mapped(vcpu, (segctl >> 59) & 0x7,
763*4882a593Smuzhiyun 0))
764*4882a593Smuzhiyun goto tlb_mapped;
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun }
768*4882a593Smuzhiyun /*
769*4882a593Smuzhiyun * Traditionally fully unmapped.
770*4882a593Smuzhiyun * Bits 61:59 specify the CCA, which we can just mask off here.
771*4882a593Smuzhiyun * Bits 58:PABITS should be zero, but we shouldn't have got here
772*4882a593Smuzhiyun * if it wasn't.
773*4882a593Smuzhiyun */
774*4882a593Smuzhiyun *gpa = gva & 0x07ffffffffffffff;
775*4882a593Smuzhiyun return 0;
776*4882a593Smuzhiyun #endif
777*4882a593Smuzhiyun }
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun tlb_mapped:
780*4882a593Smuzhiyun return kvm_vz_guest_tlb_lookup(vcpu, gva, gpa);
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun /**
784*4882a593Smuzhiyun * kvm_vz_badvaddr_to_gpa() - Convert GVA BadVAddr from root exception to GPA.
785*4882a593Smuzhiyun * @vcpu: KVM VCPU state.
786*4882a593Smuzhiyun * @badvaddr: Root BadVAddr.
787*4882a593Smuzhiyun * @gpa: Output guest physical address.
788*4882a593Smuzhiyun *
789*4882a593Smuzhiyun * VZ implementations are permitted to report guest virtual addresses (GVA) in
790*4882a593Smuzhiyun * BadVAddr on a root exception during guest execution, instead of the more
791*4882a593Smuzhiyun * convenient guest physical addresses (GPA). When we get a GVA, this function
792*4882a593Smuzhiyun * converts it to a GPA, taking into account guest segmentation and guest TLB
793*4882a593Smuzhiyun * state.
794*4882a593Smuzhiyun *
795*4882a593Smuzhiyun * Returns: 0 on success.
796*4882a593Smuzhiyun * -errno on failure.
797*4882a593Smuzhiyun */
kvm_vz_badvaddr_to_gpa(struct kvm_vcpu * vcpu,unsigned long badvaddr,unsigned long * gpa)798*4882a593Smuzhiyun static int kvm_vz_badvaddr_to_gpa(struct kvm_vcpu *vcpu, unsigned long badvaddr,
799*4882a593Smuzhiyun unsigned long *gpa)
800*4882a593Smuzhiyun {
801*4882a593Smuzhiyun unsigned int gexccode = (vcpu->arch.host_cp0_guestctl0 &
802*4882a593Smuzhiyun MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun /* If BadVAddr is GPA, then all is well in the world */
805*4882a593Smuzhiyun if (likely(gexccode == MIPS_GCTL0_GEXC_GPA)) {
806*4882a593Smuzhiyun *gpa = badvaddr;
807*4882a593Smuzhiyun return 0;
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun /* Otherwise we'd expect it to be GVA ... */
811*4882a593Smuzhiyun if (WARN(gexccode != MIPS_GCTL0_GEXC_GVA,
812*4882a593Smuzhiyun "Unexpected gexccode %#x\n", gexccode))
813*4882a593Smuzhiyun return -EINVAL;
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun /* ... and we need to perform the GVA->GPA translation in software */
816*4882a593Smuzhiyun return kvm_vz_gva_to_gpa(vcpu, badvaddr, gpa);
817*4882a593Smuzhiyun }
818*4882a593Smuzhiyun
kvm_trap_vz_no_handler(struct kvm_vcpu * vcpu)819*4882a593Smuzhiyun static int kvm_trap_vz_no_handler(struct kvm_vcpu *vcpu)
820*4882a593Smuzhiyun {
821*4882a593Smuzhiyun u32 *opc = (u32 *) vcpu->arch.pc;
822*4882a593Smuzhiyun u32 cause = vcpu->arch.host_cp0_cause;
823*4882a593Smuzhiyun u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
824*4882a593Smuzhiyun unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
825*4882a593Smuzhiyun u32 inst = 0;
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun /*
828*4882a593Smuzhiyun * Fetch the instruction.
829*4882a593Smuzhiyun */
830*4882a593Smuzhiyun if (cause & CAUSEF_BD)
831*4882a593Smuzhiyun opc += 1;
832*4882a593Smuzhiyun kvm_get_badinstr(opc, vcpu, &inst);
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
835*4882a593Smuzhiyun exccode, opc, inst, badvaddr,
836*4882a593Smuzhiyun read_gc0_status());
837*4882a593Smuzhiyun kvm_arch_vcpu_dump_regs(vcpu);
838*4882a593Smuzhiyun vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
839*4882a593Smuzhiyun return RESUME_HOST;
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun
mips_process_maar(unsigned int op,unsigned long val)842*4882a593Smuzhiyun static unsigned long mips_process_maar(unsigned int op, unsigned long val)
843*4882a593Smuzhiyun {
844*4882a593Smuzhiyun /* Mask off unused bits */
845*4882a593Smuzhiyun unsigned long mask = 0xfffff000 | MIPS_MAAR_S | MIPS_MAAR_VL;
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun if (read_gc0_pagegrain() & PG_ELPA)
848*4882a593Smuzhiyun mask |= 0x00ffffff00000000ull;
849*4882a593Smuzhiyun if (cpu_guest_has_mvh)
850*4882a593Smuzhiyun mask |= MIPS_MAAR_VH;
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun /* Set or clear VH */
853*4882a593Smuzhiyun if (op == mtc_op) {
854*4882a593Smuzhiyun /* clear VH */
855*4882a593Smuzhiyun val &= ~MIPS_MAAR_VH;
856*4882a593Smuzhiyun } else if (op == dmtc_op) {
857*4882a593Smuzhiyun /* set VH to match VL */
858*4882a593Smuzhiyun val &= ~MIPS_MAAR_VH;
859*4882a593Smuzhiyun if (val & MIPS_MAAR_VL)
860*4882a593Smuzhiyun val |= MIPS_MAAR_VH;
861*4882a593Smuzhiyun }
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun return val & mask;
864*4882a593Smuzhiyun }
865*4882a593Smuzhiyun
kvm_write_maari(struct kvm_vcpu * vcpu,unsigned long val)866*4882a593Smuzhiyun static void kvm_write_maari(struct kvm_vcpu *vcpu, unsigned long val)
867*4882a593Smuzhiyun {
868*4882a593Smuzhiyun struct mips_coproc *cop0 = vcpu->arch.cop0;
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun val &= MIPS_MAARI_INDEX;
871*4882a593Smuzhiyun if (val == MIPS_MAARI_INDEX)
872*4882a593Smuzhiyun kvm_write_sw_gc0_maari(cop0, ARRAY_SIZE(vcpu->arch.maar) - 1);
873*4882a593Smuzhiyun else if (val < ARRAY_SIZE(vcpu->arch.maar))
874*4882a593Smuzhiyun kvm_write_sw_gc0_maari(cop0, val);
875*4882a593Smuzhiyun }
876*4882a593Smuzhiyun
kvm_vz_gpsi_cop0(union mips_instruction inst,u32 * opc,u32 cause,struct kvm_vcpu * vcpu)877*4882a593Smuzhiyun static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
878*4882a593Smuzhiyun u32 *opc, u32 cause,
879*4882a593Smuzhiyun struct kvm_vcpu *vcpu)
880*4882a593Smuzhiyun {
881*4882a593Smuzhiyun struct mips_coproc *cop0 = vcpu->arch.cop0;
882*4882a593Smuzhiyun enum emulation_result er = EMULATE_DONE;
883*4882a593Smuzhiyun u32 rt, rd, sel;
884*4882a593Smuzhiyun unsigned long curr_pc;
885*4882a593Smuzhiyun unsigned long val;
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun /*
888*4882a593Smuzhiyun * Update PC and hold onto current PC in case there is
889*4882a593Smuzhiyun * an error and we want to rollback the PC
890*4882a593Smuzhiyun */
891*4882a593Smuzhiyun curr_pc = vcpu->arch.pc;
892*4882a593Smuzhiyun er = update_pc(vcpu, cause);
893*4882a593Smuzhiyun if (er == EMULATE_FAIL)
894*4882a593Smuzhiyun return er;
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun if (inst.co_format.co) {
897*4882a593Smuzhiyun switch (inst.co_format.func) {
898*4882a593Smuzhiyun case wait_op:
899*4882a593Smuzhiyun er = kvm_mips_emul_wait(vcpu);
900*4882a593Smuzhiyun break;
901*4882a593Smuzhiyun default:
902*4882a593Smuzhiyun er = EMULATE_FAIL;
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun } else {
905*4882a593Smuzhiyun rt = inst.c0r_format.rt;
906*4882a593Smuzhiyun rd = inst.c0r_format.rd;
907*4882a593Smuzhiyun sel = inst.c0r_format.sel;
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun switch (inst.c0r_format.rs) {
910*4882a593Smuzhiyun case dmfc_op:
911*4882a593Smuzhiyun case mfc_op:
912*4882a593Smuzhiyun #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
913*4882a593Smuzhiyun cop0->stat[rd][sel]++;
914*4882a593Smuzhiyun #endif
915*4882a593Smuzhiyun if (rd == MIPS_CP0_COUNT &&
916*4882a593Smuzhiyun sel == 0) { /* Count */
917*4882a593Smuzhiyun val = kvm_mips_read_count(vcpu);
918*4882a593Smuzhiyun } else if (rd == MIPS_CP0_COMPARE &&
919*4882a593Smuzhiyun sel == 0) { /* Compare */
920*4882a593Smuzhiyun val = read_gc0_compare();
921*4882a593Smuzhiyun } else if (rd == MIPS_CP0_LLADDR &&
922*4882a593Smuzhiyun sel == 0) { /* LLAddr */
923*4882a593Smuzhiyun if (cpu_guest_has_rw_llb)
924*4882a593Smuzhiyun val = read_gc0_lladdr() &
925*4882a593Smuzhiyun MIPS_LLADDR_LLB;
926*4882a593Smuzhiyun else
927*4882a593Smuzhiyun val = 0;
928*4882a593Smuzhiyun } else if (rd == MIPS_CP0_LLADDR &&
929*4882a593Smuzhiyun sel == 1 && /* MAAR */
930*4882a593Smuzhiyun cpu_guest_has_maar &&
931*4882a593Smuzhiyun !cpu_guest_has_dyn_maar) {
932*4882a593Smuzhiyun /* MAARI must be in range */
933*4882a593Smuzhiyun BUG_ON(kvm_read_sw_gc0_maari(cop0) >=
934*4882a593Smuzhiyun ARRAY_SIZE(vcpu->arch.maar));
935*4882a593Smuzhiyun val = vcpu->arch.maar[
936*4882a593Smuzhiyun kvm_read_sw_gc0_maari(cop0)];
937*4882a593Smuzhiyun } else if ((rd == MIPS_CP0_PRID &&
938*4882a593Smuzhiyun (sel == 0 || /* PRid */
939*4882a593Smuzhiyun sel == 2 || /* CDMMBase */
940*4882a593Smuzhiyun sel == 3)) || /* CMGCRBase */
941*4882a593Smuzhiyun (rd == MIPS_CP0_STATUS &&
942*4882a593Smuzhiyun (sel == 2 || /* SRSCtl */
943*4882a593Smuzhiyun sel == 3)) || /* SRSMap */
944*4882a593Smuzhiyun (rd == MIPS_CP0_CONFIG &&
945*4882a593Smuzhiyun (sel == 6 || /* Config6 */
946*4882a593Smuzhiyun sel == 7)) || /* Config7 */
947*4882a593Smuzhiyun (rd == MIPS_CP0_LLADDR &&
948*4882a593Smuzhiyun (sel == 2) && /* MAARI */
949*4882a593Smuzhiyun cpu_guest_has_maar &&
950*4882a593Smuzhiyun !cpu_guest_has_dyn_maar) ||
951*4882a593Smuzhiyun (rd == MIPS_CP0_ERRCTL &&
952*4882a593Smuzhiyun (sel == 0))) { /* ErrCtl */
953*4882a593Smuzhiyun val = cop0->reg[rd][sel];
954*4882a593Smuzhiyun #ifdef CONFIG_CPU_LOONGSON64
955*4882a593Smuzhiyun } else if (rd == MIPS_CP0_DIAG &&
956*4882a593Smuzhiyun (sel == 0)) { /* Diag */
957*4882a593Smuzhiyun val = cop0->reg[rd][sel];
958*4882a593Smuzhiyun #endif
959*4882a593Smuzhiyun } else {
960*4882a593Smuzhiyun val = 0;
961*4882a593Smuzhiyun er = EMULATE_FAIL;
962*4882a593Smuzhiyun }
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun if (er != EMULATE_FAIL) {
965*4882a593Smuzhiyun /* Sign extend */
966*4882a593Smuzhiyun if (inst.c0r_format.rs == mfc_op)
967*4882a593Smuzhiyun val = (int)val;
968*4882a593Smuzhiyun vcpu->arch.gprs[rt] = val;
969*4882a593Smuzhiyun }
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mfc_op) ?
972*4882a593Smuzhiyun KVM_TRACE_MFC0 : KVM_TRACE_DMFC0,
973*4882a593Smuzhiyun KVM_TRACE_COP0(rd, sel), val);
974*4882a593Smuzhiyun break;
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun case dmtc_op:
977*4882a593Smuzhiyun case mtc_op:
978*4882a593Smuzhiyun #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
979*4882a593Smuzhiyun cop0->stat[rd][sel]++;
980*4882a593Smuzhiyun #endif
981*4882a593Smuzhiyun val = vcpu->arch.gprs[rt];
982*4882a593Smuzhiyun trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mtc_op) ?
983*4882a593Smuzhiyun KVM_TRACE_MTC0 : KVM_TRACE_DMTC0,
984*4882a593Smuzhiyun KVM_TRACE_COP0(rd, sel), val);
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun if (rd == MIPS_CP0_COUNT &&
987*4882a593Smuzhiyun sel == 0) { /* Count */
988*4882a593Smuzhiyun kvm_vz_lose_htimer(vcpu);
989*4882a593Smuzhiyun kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
990*4882a593Smuzhiyun } else if (rd == MIPS_CP0_COMPARE &&
991*4882a593Smuzhiyun sel == 0) { /* Compare */
992*4882a593Smuzhiyun kvm_mips_write_compare(vcpu,
993*4882a593Smuzhiyun vcpu->arch.gprs[rt],
994*4882a593Smuzhiyun true);
995*4882a593Smuzhiyun } else if (rd == MIPS_CP0_LLADDR &&
996*4882a593Smuzhiyun sel == 0) { /* LLAddr */
997*4882a593Smuzhiyun /*
998*4882a593Smuzhiyun * P5600 generates GPSI on guest MTC0 LLAddr.
999*4882a593Smuzhiyun * Only allow the guest to clear LLB.
1000*4882a593Smuzhiyun */
1001*4882a593Smuzhiyun if (cpu_guest_has_rw_llb &&
1002*4882a593Smuzhiyun !(val & MIPS_LLADDR_LLB))
1003*4882a593Smuzhiyun write_gc0_lladdr(0);
1004*4882a593Smuzhiyun } else if (rd == MIPS_CP0_LLADDR &&
1005*4882a593Smuzhiyun sel == 1 && /* MAAR */
1006*4882a593Smuzhiyun cpu_guest_has_maar &&
1007*4882a593Smuzhiyun !cpu_guest_has_dyn_maar) {
1008*4882a593Smuzhiyun val = mips_process_maar(inst.c0r_format.rs,
1009*4882a593Smuzhiyun val);
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun /* MAARI must be in range */
1012*4882a593Smuzhiyun BUG_ON(kvm_read_sw_gc0_maari(cop0) >=
1013*4882a593Smuzhiyun ARRAY_SIZE(vcpu->arch.maar));
1014*4882a593Smuzhiyun vcpu->arch.maar[kvm_read_sw_gc0_maari(cop0)] =
1015*4882a593Smuzhiyun val;
1016*4882a593Smuzhiyun } else if (rd == MIPS_CP0_LLADDR &&
1017*4882a593Smuzhiyun (sel == 2) && /* MAARI */
1018*4882a593Smuzhiyun cpu_guest_has_maar &&
1019*4882a593Smuzhiyun !cpu_guest_has_dyn_maar) {
1020*4882a593Smuzhiyun kvm_write_maari(vcpu, val);
1021*4882a593Smuzhiyun } else if (rd == MIPS_CP0_CONFIG &&
1022*4882a593Smuzhiyun (sel == 6)) {
1023*4882a593Smuzhiyun cop0->reg[rd][sel] = (int)val;
1024*4882a593Smuzhiyun } else if (rd == MIPS_CP0_ERRCTL &&
1025*4882a593Smuzhiyun (sel == 0)) { /* ErrCtl */
1026*4882a593Smuzhiyun /* ignore the written value */
1027*4882a593Smuzhiyun #ifdef CONFIG_CPU_LOONGSON64
1028*4882a593Smuzhiyun } else if (rd == MIPS_CP0_DIAG &&
1029*4882a593Smuzhiyun (sel == 0)) { /* Diag */
1030*4882a593Smuzhiyun unsigned long flags;
1031*4882a593Smuzhiyun
1032*4882a593Smuzhiyun local_irq_save(flags);
1033*4882a593Smuzhiyun if (val & LOONGSON_DIAG_BTB) {
1034*4882a593Smuzhiyun /* Flush BTB */
1035*4882a593Smuzhiyun set_c0_diag(LOONGSON_DIAG_BTB);
1036*4882a593Smuzhiyun }
1037*4882a593Smuzhiyun if (val & LOONGSON_DIAG_ITLB) {
1038*4882a593Smuzhiyun /* Flush ITLB */
1039*4882a593Smuzhiyun set_c0_diag(LOONGSON_DIAG_ITLB);
1040*4882a593Smuzhiyun }
1041*4882a593Smuzhiyun if (val & LOONGSON_DIAG_DTLB) {
1042*4882a593Smuzhiyun /* Flush DTLB */
1043*4882a593Smuzhiyun set_c0_diag(LOONGSON_DIAG_DTLB);
1044*4882a593Smuzhiyun }
1045*4882a593Smuzhiyun if (val & LOONGSON_DIAG_VTLB) {
1046*4882a593Smuzhiyun /* Flush VTLB */
1047*4882a593Smuzhiyun kvm_loongson_clear_guest_vtlb();
1048*4882a593Smuzhiyun }
1049*4882a593Smuzhiyun if (val & LOONGSON_DIAG_FTLB) {
1050*4882a593Smuzhiyun /* Flush FTLB */
1051*4882a593Smuzhiyun kvm_loongson_clear_guest_ftlb();
1052*4882a593Smuzhiyun }
1053*4882a593Smuzhiyun local_irq_restore(flags);
1054*4882a593Smuzhiyun #endif
1055*4882a593Smuzhiyun } else {
1056*4882a593Smuzhiyun er = EMULATE_FAIL;
1057*4882a593Smuzhiyun }
1058*4882a593Smuzhiyun break;
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun default:
1061*4882a593Smuzhiyun er = EMULATE_FAIL;
1062*4882a593Smuzhiyun break;
1063*4882a593Smuzhiyun }
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun /* Rollback PC only if emulation was unsuccessful */
1066*4882a593Smuzhiyun if (er == EMULATE_FAIL) {
1067*4882a593Smuzhiyun kvm_err("[%#lx]%s: unsupported cop0 instruction 0x%08x\n",
1068*4882a593Smuzhiyun curr_pc, __func__, inst.word);
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun vcpu->arch.pc = curr_pc;
1071*4882a593Smuzhiyun }
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun return er;
1074*4882a593Smuzhiyun }
1075*4882a593Smuzhiyun
kvm_vz_gpsi_cache(union mips_instruction inst,u32 * opc,u32 cause,struct kvm_vcpu * vcpu)1076*4882a593Smuzhiyun static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst,
1077*4882a593Smuzhiyun u32 *opc, u32 cause,
1078*4882a593Smuzhiyun struct kvm_vcpu *vcpu)
1079*4882a593Smuzhiyun {
1080*4882a593Smuzhiyun enum emulation_result er = EMULATE_DONE;
1081*4882a593Smuzhiyun u32 cache, op_inst, op, base;
1082*4882a593Smuzhiyun s16 offset;
1083*4882a593Smuzhiyun struct kvm_vcpu_arch *arch = &vcpu->arch;
1084*4882a593Smuzhiyun unsigned long va, curr_pc;
1085*4882a593Smuzhiyun
1086*4882a593Smuzhiyun /*
1087*4882a593Smuzhiyun * Update PC and hold onto current PC in case there is
1088*4882a593Smuzhiyun * an error and we want to rollback the PC
1089*4882a593Smuzhiyun */
1090*4882a593Smuzhiyun curr_pc = vcpu->arch.pc;
1091*4882a593Smuzhiyun er = update_pc(vcpu, cause);
1092*4882a593Smuzhiyun if (er == EMULATE_FAIL)
1093*4882a593Smuzhiyun return er;
1094*4882a593Smuzhiyun
1095*4882a593Smuzhiyun base = inst.i_format.rs;
1096*4882a593Smuzhiyun op_inst = inst.i_format.rt;
1097*4882a593Smuzhiyun if (cpu_has_mips_r6)
1098*4882a593Smuzhiyun offset = inst.spec3_format.simmediate;
1099*4882a593Smuzhiyun else
1100*4882a593Smuzhiyun offset = inst.i_format.simmediate;
1101*4882a593Smuzhiyun cache = op_inst & CacheOp_Cache;
1102*4882a593Smuzhiyun op = op_inst & CacheOp_Op;
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun va = arch->gprs[base] + offset;
1105*4882a593Smuzhiyun
1106*4882a593Smuzhiyun kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1107*4882a593Smuzhiyun cache, op, base, arch->gprs[base], offset);
1108*4882a593Smuzhiyun
1109*4882a593Smuzhiyun /* Secondary or tirtiary cache ops ignored */
1110*4882a593Smuzhiyun if (cache != Cache_I && cache != Cache_D)
1111*4882a593Smuzhiyun return EMULATE_DONE;
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun switch (op_inst) {
1114*4882a593Smuzhiyun case Index_Invalidate_I:
1115*4882a593Smuzhiyun flush_icache_line_indexed(va);
1116*4882a593Smuzhiyun return EMULATE_DONE;
1117*4882a593Smuzhiyun case Index_Writeback_Inv_D:
1118*4882a593Smuzhiyun flush_dcache_line_indexed(va);
1119*4882a593Smuzhiyun return EMULATE_DONE;
1120*4882a593Smuzhiyun case Hit_Invalidate_I:
1121*4882a593Smuzhiyun case Hit_Invalidate_D:
1122*4882a593Smuzhiyun case Hit_Writeback_Inv_D:
1123*4882a593Smuzhiyun if (boot_cpu_type() == CPU_CAVIUM_OCTEON3) {
1124*4882a593Smuzhiyun /* We can just flush entire icache */
1125*4882a593Smuzhiyun local_flush_icache_range(0, 0);
1126*4882a593Smuzhiyun return EMULATE_DONE;
1127*4882a593Smuzhiyun }
1128*4882a593Smuzhiyun
1129*4882a593Smuzhiyun /* So far, other platforms support guest hit cache ops */
1130*4882a593Smuzhiyun break;
1131*4882a593Smuzhiyun default:
1132*4882a593Smuzhiyun break;
1133*4882a593Smuzhiyun }
1134*4882a593Smuzhiyun
1135*4882a593Smuzhiyun kvm_err("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1136*4882a593Smuzhiyun curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base],
1137*4882a593Smuzhiyun offset);
1138*4882a593Smuzhiyun /* Rollback PC */
1139*4882a593Smuzhiyun vcpu->arch.pc = curr_pc;
1140*4882a593Smuzhiyun
1141*4882a593Smuzhiyun return EMULATE_FAIL;
1142*4882a593Smuzhiyun }
1143*4882a593Smuzhiyun
1144*4882a593Smuzhiyun #ifdef CONFIG_CPU_LOONGSON64
kvm_vz_gpsi_lwc2(union mips_instruction inst,u32 * opc,u32 cause,struct kvm_vcpu * vcpu)1145*4882a593Smuzhiyun static enum emulation_result kvm_vz_gpsi_lwc2(union mips_instruction inst,
1146*4882a593Smuzhiyun u32 *opc, u32 cause,
1147*4882a593Smuzhiyun struct kvm_vcpu *vcpu)
1148*4882a593Smuzhiyun {
1149*4882a593Smuzhiyun unsigned int rs, rd;
1150*4882a593Smuzhiyun unsigned int hostcfg;
1151*4882a593Smuzhiyun unsigned long curr_pc;
1152*4882a593Smuzhiyun enum emulation_result er = EMULATE_DONE;
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun /*
1155*4882a593Smuzhiyun * Update PC and hold onto current PC in case there is
1156*4882a593Smuzhiyun * an error and we want to rollback the PC
1157*4882a593Smuzhiyun */
1158*4882a593Smuzhiyun curr_pc = vcpu->arch.pc;
1159*4882a593Smuzhiyun er = update_pc(vcpu, cause);
1160*4882a593Smuzhiyun if (er == EMULATE_FAIL)
1161*4882a593Smuzhiyun return er;
1162*4882a593Smuzhiyun
1163*4882a593Smuzhiyun rs = inst.loongson3_lscsr_format.rs;
1164*4882a593Smuzhiyun rd = inst.loongson3_lscsr_format.rd;
1165*4882a593Smuzhiyun switch (inst.loongson3_lscsr_format.fr) {
1166*4882a593Smuzhiyun case 0x8: /* Read CPUCFG */
1167*4882a593Smuzhiyun ++vcpu->stat.vz_cpucfg_exits;
1168*4882a593Smuzhiyun hostcfg = read_cpucfg(vcpu->arch.gprs[rs]);
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun switch (vcpu->arch.gprs[rs]) {
1171*4882a593Smuzhiyun case LOONGSON_CFG0:
1172*4882a593Smuzhiyun vcpu->arch.gprs[rd] = 0x14c000;
1173*4882a593Smuzhiyun break;
1174*4882a593Smuzhiyun case LOONGSON_CFG1:
1175*4882a593Smuzhiyun hostcfg &= (LOONGSON_CFG1_FP | LOONGSON_CFG1_MMI |
1176*4882a593Smuzhiyun LOONGSON_CFG1_MSA1 | LOONGSON_CFG1_MSA2 |
1177*4882a593Smuzhiyun LOONGSON_CFG1_SFBP);
1178*4882a593Smuzhiyun vcpu->arch.gprs[rd] = hostcfg;
1179*4882a593Smuzhiyun break;
1180*4882a593Smuzhiyun case LOONGSON_CFG2:
1181*4882a593Smuzhiyun hostcfg &= (LOONGSON_CFG2_LEXT1 | LOONGSON_CFG2_LEXT2 |
1182*4882a593Smuzhiyun LOONGSON_CFG2_LEXT3 | LOONGSON_CFG2_LSPW);
1183*4882a593Smuzhiyun vcpu->arch.gprs[rd] = hostcfg;
1184*4882a593Smuzhiyun break;
1185*4882a593Smuzhiyun case LOONGSON_CFG3:
1186*4882a593Smuzhiyun vcpu->arch.gprs[rd] = hostcfg;
1187*4882a593Smuzhiyun break;
1188*4882a593Smuzhiyun default:
1189*4882a593Smuzhiyun /* Don't export any other advanced features to guest */
1190*4882a593Smuzhiyun vcpu->arch.gprs[rd] = 0;
1191*4882a593Smuzhiyun break;
1192*4882a593Smuzhiyun }
1193*4882a593Smuzhiyun break;
1194*4882a593Smuzhiyun
1195*4882a593Smuzhiyun default:
1196*4882a593Smuzhiyun kvm_err("lwc2 emulate not impl %d rs %lx @%lx\n",
1197*4882a593Smuzhiyun inst.loongson3_lscsr_format.fr, vcpu->arch.gprs[rs], curr_pc);
1198*4882a593Smuzhiyun er = EMULATE_FAIL;
1199*4882a593Smuzhiyun break;
1200*4882a593Smuzhiyun }
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun /* Rollback PC only if emulation was unsuccessful */
1203*4882a593Smuzhiyun if (er == EMULATE_FAIL) {
1204*4882a593Smuzhiyun kvm_err("[%#lx]%s: unsupported lwc2 instruction 0x%08x 0x%08x\n",
1205*4882a593Smuzhiyun curr_pc, __func__, inst.word, inst.loongson3_lscsr_format.fr);
1206*4882a593Smuzhiyun
1207*4882a593Smuzhiyun vcpu->arch.pc = curr_pc;
1208*4882a593Smuzhiyun }
1209*4882a593Smuzhiyun
1210*4882a593Smuzhiyun return er;
1211*4882a593Smuzhiyun }
1212*4882a593Smuzhiyun #endif
1213*4882a593Smuzhiyun
kvm_trap_vz_handle_gpsi(u32 cause,u32 * opc,struct kvm_vcpu * vcpu)1214*4882a593Smuzhiyun static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
1215*4882a593Smuzhiyun struct kvm_vcpu *vcpu)
1216*4882a593Smuzhiyun {
1217*4882a593Smuzhiyun enum emulation_result er = EMULATE_DONE;
1218*4882a593Smuzhiyun struct kvm_vcpu_arch *arch = &vcpu->arch;
1219*4882a593Smuzhiyun union mips_instruction inst;
1220*4882a593Smuzhiyun int rd, rt, sel;
1221*4882a593Smuzhiyun int err;
1222*4882a593Smuzhiyun
1223*4882a593Smuzhiyun /*
1224*4882a593Smuzhiyun * Fetch the instruction.
1225*4882a593Smuzhiyun */
1226*4882a593Smuzhiyun if (cause & CAUSEF_BD)
1227*4882a593Smuzhiyun opc += 1;
1228*4882a593Smuzhiyun err = kvm_get_badinstr(opc, vcpu, &inst.word);
1229*4882a593Smuzhiyun if (err)
1230*4882a593Smuzhiyun return EMULATE_FAIL;
1231*4882a593Smuzhiyun
1232*4882a593Smuzhiyun switch (inst.r_format.opcode) {
1233*4882a593Smuzhiyun case cop0_op:
1234*4882a593Smuzhiyun er = kvm_vz_gpsi_cop0(inst, opc, cause, vcpu);
1235*4882a593Smuzhiyun break;
1236*4882a593Smuzhiyun #ifndef CONFIG_CPU_MIPSR6
1237*4882a593Smuzhiyun case cache_op:
1238*4882a593Smuzhiyun trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1239*4882a593Smuzhiyun er = kvm_vz_gpsi_cache(inst, opc, cause, vcpu);
1240*4882a593Smuzhiyun break;
1241*4882a593Smuzhiyun #endif
1242*4882a593Smuzhiyun #ifdef CONFIG_CPU_LOONGSON64
1243*4882a593Smuzhiyun case lwc2_op:
1244*4882a593Smuzhiyun er = kvm_vz_gpsi_lwc2(inst, opc, cause, vcpu);
1245*4882a593Smuzhiyun break;
1246*4882a593Smuzhiyun #endif
1247*4882a593Smuzhiyun case spec3_op:
1248*4882a593Smuzhiyun switch (inst.spec3_format.func) {
1249*4882a593Smuzhiyun #ifdef CONFIG_CPU_MIPSR6
1250*4882a593Smuzhiyun case cache6_op:
1251*4882a593Smuzhiyun trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1252*4882a593Smuzhiyun er = kvm_vz_gpsi_cache(inst, opc, cause, vcpu);
1253*4882a593Smuzhiyun break;
1254*4882a593Smuzhiyun #endif
1255*4882a593Smuzhiyun case rdhwr_op:
1256*4882a593Smuzhiyun if (inst.r_format.rs || (inst.r_format.re >> 3))
1257*4882a593Smuzhiyun goto unknown;
1258*4882a593Smuzhiyun
1259*4882a593Smuzhiyun rd = inst.r_format.rd;
1260*4882a593Smuzhiyun rt = inst.r_format.rt;
1261*4882a593Smuzhiyun sel = inst.r_format.re & 0x7;
1262*4882a593Smuzhiyun
1263*4882a593Smuzhiyun switch (rd) {
1264*4882a593Smuzhiyun case MIPS_HWR_CC: /* Read count register */
1265*4882a593Smuzhiyun arch->gprs[rt] =
1266*4882a593Smuzhiyun (long)(int)kvm_mips_read_count(vcpu);
1267*4882a593Smuzhiyun break;
1268*4882a593Smuzhiyun default:
1269*4882a593Smuzhiyun trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
1270*4882a593Smuzhiyun KVM_TRACE_HWR(rd, sel), 0);
1271*4882a593Smuzhiyun goto unknown;
1272*4882a593Smuzhiyun }
1273*4882a593Smuzhiyun
1274*4882a593Smuzhiyun trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
1275*4882a593Smuzhiyun KVM_TRACE_HWR(rd, sel), arch->gprs[rt]);
1276*4882a593Smuzhiyun
1277*4882a593Smuzhiyun er = update_pc(vcpu, cause);
1278*4882a593Smuzhiyun break;
1279*4882a593Smuzhiyun default:
1280*4882a593Smuzhiyun goto unknown;
1281*4882a593Smuzhiyun }
1282*4882a593Smuzhiyun break;
1283*4882a593Smuzhiyun unknown:
1284*4882a593Smuzhiyun
1285*4882a593Smuzhiyun default:
1286*4882a593Smuzhiyun kvm_err("GPSI exception not supported (%p/%#x)\n",
1287*4882a593Smuzhiyun opc, inst.word);
1288*4882a593Smuzhiyun kvm_arch_vcpu_dump_regs(vcpu);
1289*4882a593Smuzhiyun er = EMULATE_FAIL;
1290*4882a593Smuzhiyun break;
1291*4882a593Smuzhiyun }
1292*4882a593Smuzhiyun
1293*4882a593Smuzhiyun return er;
1294*4882a593Smuzhiyun }
1295*4882a593Smuzhiyun
kvm_trap_vz_handle_gsfc(u32 cause,u32 * opc,struct kvm_vcpu * vcpu)1296*4882a593Smuzhiyun static enum emulation_result kvm_trap_vz_handle_gsfc(u32 cause, u32 *opc,
1297*4882a593Smuzhiyun struct kvm_vcpu *vcpu)
1298*4882a593Smuzhiyun {
1299*4882a593Smuzhiyun enum emulation_result er = EMULATE_DONE;
1300*4882a593Smuzhiyun struct kvm_vcpu_arch *arch = &vcpu->arch;
1301*4882a593Smuzhiyun union mips_instruction inst;
1302*4882a593Smuzhiyun int err;
1303*4882a593Smuzhiyun
1304*4882a593Smuzhiyun /*
1305*4882a593Smuzhiyun * Fetch the instruction.
1306*4882a593Smuzhiyun */
1307*4882a593Smuzhiyun if (cause & CAUSEF_BD)
1308*4882a593Smuzhiyun opc += 1;
1309*4882a593Smuzhiyun err = kvm_get_badinstr(opc, vcpu, &inst.word);
1310*4882a593Smuzhiyun if (err)
1311*4882a593Smuzhiyun return EMULATE_FAIL;
1312*4882a593Smuzhiyun
1313*4882a593Smuzhiyun /* complete MTC0 on behalf of guest and advance EPC */
1314*4882a593Smuzhiyun if (inst.c0r_format.opcode == cop0_op &&
1315*4882a593Smuzhiyun inst.c0r_format.rs == mtc_op &&
1316*4882a593Smuzhiyun inst.c0r_format.z == 0) {
1317*4882a593Smuzhiyun int rt = inst.c0r_format.rt;
1318*4882a593Smuzhiyun int rd = inst.c0r_format.rd;
1319*4882a593Smuzhiyun int sel = inst.c0r_format.sel;
1320*4882a593Smuzhiyun unsigned int val = arch->gprs[rt];
1321*4882a593Smuzhiyun unsigned int old_val, change;
1322*4882a593Smuzhiyun
1323*4882a593Smuzhiyun trace_kvm_hwr(vcpu, KVM_TRACE_MTC0, KVM_TRACE_COP0(rd, sel),
1324*4882a593Smuzhiyun val);
1325*4882a593Smuzhiyun
1326*4882a593Smuzhiyun if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
1327*4882a593Smuzhiyun /* FR bit should read as zero if no FPU */
1328*4882a593Smuzhiyun if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1329*4882a593Smuzhiyun val &= ~(ST0_CU1 | ST0_FR);
1330*4882a593Smuzhiyun
1331*4882a593Smuzhiyun /*
1332*4882a593Smuzhiyun * Also don't allow FR to be set if host doesn't support
1333*4882a593Smuzhiyun * it.
1334*4882a593Smuzhiyun */
1335*4882a593Smuzhiyun if (!(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
1336*4882a593Smuzhiyun val &= ~ST0_FR;
1337*4882a593Smuzhiyun
1338*4882a593Smuzhiyun old_val = read_gc0_status();
1339*4882a593Smuzhiyun change = val ^ old_val;
1340*4882a593Smuzhiyun
1341*4882a593Smuzhiyun if (change & ST0_FR) {
1342*4882a593Smuzhiyun /*
1343*4882a593Smuzhiyun * FPU and Vector register state is made
1344*4882a593Smuzhiyun * UNPREDICTABLE by a change of FR, so don't
1345*4882a593Smuzhiyun * even bother saving it.
1346*4882a593Smuzhiyun */
1347*4882a593Smuzhiyun kvm_drop_fpu(vcpu);
1348*4882a593Smuzhiyun }
1349*4882a593Smuzhiyun
1350*4882a593Smuzhiyun /*
1351*4882a593Smuzhiyun * If MSA state is already live, it is undefined how it
1352*4882a593Smuzhiyun * interacts with FR=0 FPU state, and we don't want to
1353*4882a593Smuzhiyun * hit reserved instruction exceptions trying to save
1354*4882a593Smuzhiyun * the MSA state later when CU=1 && FR=1, so play it
1355*4882a593Smuzhiyun * safe and save it first.
1356*4882a593Smuzhiyun */
1357*4882a593Smuzhiyun if (change & ST0_CU1 && !(val & ST0_FR) &&
1358*4882a593Smuzhiyun vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1359*4882a593Smuzhiyun kvm_lose_fpu(vcpu);
1360*4882a593Smuzhiyun
1361*4882a593Smuzhiyun write_gc0_status(val);
1362*4882a593Smuzhiyun } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
1363*4882a593Smuzhiyun u32 old_cause = read_gc0_cause();
1364*4882a593Smuzhiyun u32 change = old_cause ^ val;
1365*4882a593Smuzhiyun
1366*4882a593Smuzhiyun /* DC bit enabling/disabling timer? */
1367*4882a593Smuzhiyun if (change & CAUSEF_DC) {
1368*4882a593Smuzhiyun if (val & CAUSEF_DC) {
1369*4882a593Smuzhiyun kvm_vz_lose_htimer(vcpu);
1370*4882a593Smuzhiyun kvm_mips_count_disable_cause(vcpu);
1371*4882a593Smuzhiyun } else {
1372*4882a593Smuzhiyun kvm_mips_count_enable_cause(vcpu);
1373*4882a593Smuzhiyun }
1374*4882a593Smuzhiyun }
1375*4882a593Smuzhiyun
1376*4882a593Smuzhiyun /* Only certain bits are RW to the guest */
1377*4882a593Smuzhiyun change &= (CAUSEF_DC | CAUSEF_IV | CAUSEF_WP |
1378*4882a593Smuzhiyun CAUSEF_IP0 | CAUSEF_IP1);
1379*4882a593Smuzhiyun
1380*4882a593Smuzhiyun /* WP can only be cleared */
1381*4882a593Smuzhiyun change &= ~CAUSEF_WP | old_cause;
1382*4882a593Smuzhiyun
1383*4882a593Smuzhiyun write_gc0_cause(old_cause ^ change);
1384*4882a593Smuzhiyun } else if ((rd == MIPS_CP0_STATUS) && (sel == 1)) { /* IntCtl */
1385*4882a593Smuzhiyun write_gc0_intctl(val);
1386*4882a593Smuzhiyun } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
1387*4882a593Smuzhiyun old_val = read_gc0_config5();
1388*4882a593Smuzhiyun change = val ^ old_val;
1389*4882a593Smuzhiyun /* Handle changes in FPU/MSA modes */
1390*4882a593Smuzhiyun preempt_disable();
1391*4882a593Smuzhiyun
1392*4882a593Smuzhiyun /*
1393*4882a593Smuzhiyun * Propagate FRE changes immediately if the FPU
1394*4882a593Smuzhiyun * context is already loaded.
1395*4882a593Smuzhiyun */
1396*4882a593Smuzhiyun if (change & MIPS_CONF5_FRE &&
1397*4882a593Smuzhiyun vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
1398*4882a593Smuzhiyun change_c0_config5(MIPS_CONF5_FRE, val);
1399*4882a593Smuzhiyun
1400*4882a593Smuzhiyun preempt_enable();
1401*4882a593Smuzhiyun
1402*4882a593Smuzhiyun val = old_val ^
1403*4882a593Smuzhiyun (change & kvm_vz_config5_guest_wrmask(vcpu));
1404*4882a593Smuzhiyun write_gc0_config5(val);
1405*4882a593Smuzhiyun } else {
1406*4882a593Smuzhiyun kvm_err("Handle GSFC, unsupported field change @ %p: %#x\n",
1407*4882a593Smuzhiyun opc, inst.word);
1408*4882a593Smuzhiyun er = EMULATE_FAIL;
1409*4882a593Smuzhiyun }
1410*4882a593Smuzhiyun
1411*4882a593Smuzhiyun if (er != EMULATE_FAIL)
1412*4882a593Smuzhiyun er = update_pc(vcpu, cause);
1413*4882a593Smuzhiyun } else {
1414*4882a593Smuzhiyun kvm_err("Handle GSFC, unrecognized instruction @ %p: %#x\n",
1415*4882a593Smuzhiyun opc, inst.word);
1416*4882a593Smuzhiyun er = EMULATE_FAIL;
1417*4882a593Smuzhiyun }
1418*4882a593Smuzhiyun
1419*4882a593Smuzhiyun return er;
1420*4882a593Smuzhiyun }
1421*4882a593Smuzhiyun
kvm_trap_vz_handle_ghfc(u32 cause,u32 * opc,struct kvm_vcpu * vcpu)1422*4882a593Smuzhiyun static enum emulation_result kvm_trap_vz_handle_ghfc(u32 cause, u32 *opc,
1423*4882a593Smuzhiyun struct kvm_vcpu *vcpu)
1424*4882a593Smuzhiyun {
1425*4882a593Smuzhiyun /*
1426*4882a593Smuzhiyun * Presumably this is due to MC (guest mode change), so lets trace some
1427*4882a593Smuzhiyun * relevant info.
1428*4882a593Smuzhiyun */
1429*4882a593Smuzhiyun trace_kvm_guest_mode_change(vcpu);
1430*4882a593Smuzhiyun
1431*4882a593Smuzhiyun return EMULATE_DONE;
1432*4882a593Smuzhiyun }
1433*4882a593Smuzhiyun
kvm_trap_vz_handle_hc(u32 cause,u32 * opc,struct kvm_vcpu * vcpu)1434*4882a593Smuzhiyun static enum emulation_result kvm_trap_vz_handle_hc(u32 cause, u32 *opc,
1435*4882a593Smuzhiyun struct kvm_vcpu *vcpu)
1436*4882a593Smuzhiyun {
1437*4882a593Smuzhiyun enum emulation_result er;
1438*4882a593Smuzhiyun union mips_instruction inst;
1439*4882a593Smuzhiyun unsigned long curr_pc;
1440*4882a593Smuzhiyun int err;
1441*4882a593Smuzhiyun
1442*4882a593Smuzhiyun if (cause & CAUSEF_BD)
1443*4882a593Smuzhiyun opc += 1;
1444*4882a593Smuzhiyun err = kvm_get_badinstr(opc, vcpu, &inst.word);
1445*4882a593Smuzhiyun if (err)
1446*4882a593Smuzhiyun return EMULATE_FAIL;
1447*4882a593Smuzhiyun
1448*4882a593Smuzhiyun /*
1449*4882a593Smuzhiyun * Update PC and hold onto current PC in case there is
1450*4882a593Smuzhiyun * an error and we want to rollback the PC
1451*4882a593Smuzhiyun */
1452*4882a593Smuzhiyun curr_pc = vcpu->arch.pc;
1453*4882a593Smuzhiyun er = update_pc(vcpu, cause);
1454*4882a593Smuzhiyun if (er == EMULATE_FAIL)
1455*4882a593Smuzhiyun return er;
1456*4882a593Smuzhiyun
1457*4882a593Smuzhiyun er = kvm_mips_emul_hypcall(vcpu, inst);
1458*4882a593Smuzhiyun if (er == EMULATE_FAIL)
1459*4882a593Smuzhiyun vcpu->arch.pc = curr_pc;
1460*4882a593Smuzhiyun
1461*4882a593Smuzhiyun return er;
1462*4882a593Smuzhiyun }
1463*4882a593Smuzhiyun
kvm_trap_vz_no_handler_guest_exit(u32 gexccode,u32 cause,u32 * opc,struct kvm_vcpu * vcpu)1464*4882a593Smuzhiyun static enum emulation_result kvm_trap_vz_no_handler_guest_exit(u32 gexccode,
1465*4882a593Smuzhiyun u32 cause,
1466*4882a593Smuzhiyun u32 *opc,
1467*4882a593Smuzhiyun struct kvm_vcpu *vcpu)
1468*4882a593Smuzhiyun {
1469*4882a593Smuzhiyun u32 inst;
1470*4882a593Smuzhiyun
1471*4882a593Smuzhiyun /*
1472*4882a593Smuzhiyun * Fetch the instruction.
1473*4882a593Smuzhiyun */
1474*4882a593Smuzhiyun if (cause & CAUSEF_BD)
1475*4882a593Smuzhiyun opc += 1;
1476*4882a593Smuzhiyun kvm_get_badinstr(opc, vcpu, &inst);
1477*4882a593Smuzhiyun
1478*4882a593Smuzhiyun kvm_err("Guest Exception Code: %d not yet handled @ PC: %p, inst: 0x%08x Status: %#x\n",
1479*4882a593Smuzhiyun gexccode, opc, inst, read_gc0_status());
1480*4882a593Smuzhiyun
1481*4882a593Smuzhiyun return EMULATE_FAIL;
1482*4882a593Smuzhiyun }
1483*4882a593Smuzhiyun
kvm_trap_vz_handle_guest_exit(struct kvm_vcpu * vcpu)1484*4882a593Smuzhiyun static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
1485*4882a593Smuzhiyun {
1486*4882a593Smuzhiyun u32 *opc = (u32 *) vcpu->arch.pc;
1487*4882a593Smuzhiyun u32 cause = vcpu->arch.host_cp0_cause;
1488*4882a593Smuzhiyun enum emulation_result er = EMULATE_DONE;
1489*4882a593Smuzhiyun u32 gexccode = (vcpu->arch.host_cp0_guestctl0 &
1490*4882a593Smuzhiyun MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
1491*4882a593Smuzhiyun int ret = RESUME_GUEST;
1492*4882a593Smuzhiyun
1493*4882a593Smuzhiyun trace_kvm_exit(vcpu, KVM_TRACE_EXIT_GEXCCODE_BASE + gexccode);
1494*4882a593Smuzhiyun switch (gexccode) {
1495*4882a593Smuzhiyun case MIPS_GCTL0_GEXC_GPSI:
1496*4882a593Smuzhiyun ++vcpu->stat.vz_gpsi_exits;
1497*4882a593Smuzhiyun er = kvm_trap_vz_handle_gpsi(cause, opc, vcpu);
1498*4882a593Smuzhiyun break;
1499*4882a593Smuzhiyun case MIPS_GCTL0_GEXC_GSFC:
1500*4882a593Smuzhiyun ++vcpu->stat.vz_gsfc_exits;
1501*4882a593Smuzhiyun er = kvm_trap_vz_handle_gsfc(cause, opc, vcpu);
1502*4882a593Smuzhiyun break;
1503*4882a593Smuzhiyun case MIPS_GCTL0_GEXC_HC:
1504*4882a593Smuzhiyun ++vcpu->stat.vz_hc_exits;
1505*4882a593Smuzhiyun er = kvm_trap_vz_handle_hc(cause, opc, vcpu);
1506*4882a593Smuzhiyun break;
1507*4882a593Smuzhiyun case MIPS_GCTL0_GEXC_GRR:
1508*4882a593Smuzhiyun ++vcpu->stat.vz_grr_exits;
1509*4882a593Smuzhiyun er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1510*4882a593Smuzhiyun vcpu);
1511*4882a593Smuzhiyun break;
1512*4882a593Smuzhiyun case MIPS_GCTL0_GEXC_GVA:
1513*4882a593Smuzhiyun ++vcpu->stat.vz_gva_exits;
1514*4882a593Smuzhiyun er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1515*4882a593Smuzhiyun vcpu);
1516*4882a593Smuzhiyun break;
1517*4882a593Smuzhiyun case MIPS_GCTL0_GEXC_GHFC:
1518*4882a593Smuzhiyun ++vcpu->stat.vz_ghfc_exits;
1519*4882a593Smuzhiyun er = kvm_trap_vz_handle_ghfc(cause, opc, vcpu);
1520*4882a593Smuzhiyun break;
1521*4882a593Smuzhiyun case MIPS_GCTL0_GEXC_GPA:
1522*4882a593Smuzhiyun ++vcpu->stat.vz_gpa_exits;
1523*4882a593Smuzhiyun er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1524*4882a593Smuzhiyun vcpu);
1525*4882a593Smuzhiyun break;
1526*4882a593Smuzhiyun default:
1527*4882a593Smuzhiyun ++vcpu->stat.vz_resvd_exits;
1528*4882a593Smuzhiyun er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1529*4882a593Smuzhiyun vcpu);
1530*4882a593Smuzhiyun break;
1531*4882a593Smuzhiyun
1532*4882a593Smuzhiyun }
1533*4882a593Smuzhiyun
1534*4882a593Smuzhiyun if (er == EMULATE_DONE) {
1535*4882a593Smuzhiyun ret = RESUME_GUEST;
1536*4882a593Smuzhiyun } else if (er == EMULATE_HYPERCALL) {
1537*4882a593Smuzhiyun ret = kvm_mips_handle_hypcall(vcpu);
1538*4882a593Smuzhiyun } else {
1539*4882a593Smuzhiyun vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1540*4882a593Smuzhiyun ret = RESUME_HOST;
1541*4882a593Smuzhiyun }
1542*4882a593Smuzhiyun return ret;
1543*4882a593Smuzhiyun }
1544*4882a593Smuzhiyun
1545*4882a593Smuzhiyun /**
1546*4882a593Smuzhiyun * kvm_trap_vz_handle_cop_unusuable() - Guest used unusable coprocessor.
1547*4882a593Smuzhiyun * @vcpu: Virtual CPU context.
1548*4882a593Smuzhiyun *
1549*4882a593Smuzhiyun * Handle when the guest attempts to use a coprocessor which hasn't been allowed
1550*4882a593Smuzhiyun * by the root context.
1551*4882a593Smuzhiyun */
kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu * vcpu)1552*4882a593Smuzhiyun static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
1553*4882a593Smuzhiyun {
1554*4882a593Smuzhiyun u32 cause = vcpu->arch.host_cp0_cause;
1555*4882a593Smuzhiyun enum emulation_result er = EMULATE_FAIL;
1556*4882a593Smuzhiyun int ret = RESUME_GUEST;
1557*4882a593Smuzhiyun
1558*4882a593Smuzhiyun if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
1559*4882a593Smuzhiyun /*
1560*4882a593Smuzhiyun * If guest FPU not present, the FPU operation should have been
1561*4882a593Smuzhiyun * treated as a reserved instruction!
1562*4882a593Smuzhiyun * If FPU already in use, we shouldn't get this at all.
1563*4882a593Smuzhiyun */
1564*4882a593Smuzhiyun if (WARN_ON(!kvm_mips_guest_has_fpu(&vcpu->arch) ||
1565*4882a593Smuzhiyun vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
1566*4882a593Smuzhiyun preempt_enable();
1567*4882a593Smuzhiyun return EMULATE_FAIL;
1568*4882a593Smuzhiyun }
1569*4882a593Smuzhiyun
1570*4882a593Smuzhiyun kvm_own_fpu(vcpu);
1571*4882a593Smuzhiyun er = EMULATE_DONE;
1572*4882a593Smuzhiyun }
1573*4882a593Smuzhiyun /* other coprocessors not handled */
1574*4882a593Smuzhiyun
1575*4882a593Smuzhiyun switch (er) {
1576*4882a593Smuzhiyun case EMULATE_DONE:
1577*4882a593Smuzhiyun ret = RESUME_GUEST;
1578*4882a593Smuzhiyun break;
1579*4882a593Smuzhiyun
1580*4882a593Smuzhiyun case EMULATE_FAIL:
1581*4882a593Smuzhiyun vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1582*4882a593Smuzhiyun ret = RESUME_HOST;
1583*4882a593Smuzhiyun break;
1584*4882a593Smuzhiyun
1585*4882a593Smuzhiyun default:
1586*4882a593Smuzhiyun BUG();
1587*4882a593Smuzhiyun }
1588*4882a593Smuzhiyun return ret;
1589*4882a593Smuzhiyun }
1590*4882a593Smuzhiyun
1591*4882a593Smuzhiyun /**
1592*4882a593Smuzhiyun * kvm_trap_vz_handle_msa_disabled() - Guest used MSA while disabled in root.
1593*4882a593Smuzhiyun * @vcpu: Virtual CPU context.
1594*4882a593Smuzhiyun *
1595*4882a593Smuzhiyun * Handle when the guest attempts to use MSA when it is disabled in the root
1596*4882a593Smuzhiyun * context.
1597*4882a593Smuzhiyun */
kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu * vcpu)1598*4882a593Smuzhiyun static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu)
1599*4882a593Smuzhiyun {
1600*4882a593Smuzhiyun /*
1601*4882a593Smuzhiyun * If MSA not present or not exposed to guest or FR=0, the MSA operation
1602*4882a593Smuzhiyun * should have been treated as a reserved instruction!
1603*4882a593Smuzhiyun * Same if CU1=1, FR=0.
1604*4882a593Smuzhiyun * If MSA already in use, we shouldn't get this at all.
1605*4882a593Smuzhiyun */
1606*4882a593Smuzhiyun if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
1607*4882a593Smuzhiyun (read_gc0_status() & (ST0_CU1 | ST0_FR)) == ST0_CU1 ||
1608*4882a593Smuzhiyun !(read_gc0_config5() & MIPS_CONF5_MSAEN) ||
1609*4882a593Smuzhiyun vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1610*4882a593Smuzhiyun vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1611*4882a593Smuzhiyun return RESUME_HOST;
1612*4882a593Smuzhiyun }
1613*4882a593Smuzhiyun
1614*4882a593Smuzhiyun kvm_own_msa(vcpu);
1615*4882a593Smuzhiyun
1616*4882a593Smuzhiyun return RESUME_GUEST;
1617*4882a593Smuzhiyun }
1618*4882a593Smuzhiyun
kvm_trap_vz_handle_tlb_ld_miss(struct kvm_vcpu * vcpu)1619*4882a593Smuzhiyun static int kvm_trap_vz_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
1620*4882a593Smuzhiyun {
1621*4882a593Smuzhiyun struct kvm_run *run = vcpu->run;
1622*4882a593Smuzhiyun u32 *opc = (u32 *) vcpu->arch.pc;
1623*4882a593Smuzhiyun u32 cause = vcpu->arch.host_cp0_cause;
1624*4882a593Smuzhiyun ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
1625*4882a593Smuzhiyun union mips_instruction inst;
1626*4882a593Smuzhiyun enum emulation_result er = EMULATE_DONE;
1627*4882a593Smuzhiyun int err, ret = RESUME_GUEST;
1628*4882a593Smuzhiyun
1629*4882a593Smuzhiyun if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, false)) {
1630*4882a593Smuzhiyun /* A code fetch fault doesn't count as an MMIO */
1631*4882a593Smuzhiyun if (kvm_is_ifetch_fault(&vcpu->arch)) {
1632*4882a593Smuzhiyun run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1633*4882a593Smuzhiyun return RESUME_HOST;
1634*4882a593Smuzhiyun }
1635*4882a593Smuzhiyun
1636*4882a593Smuzhiyun /* Fetch the instruction */
1637*4882a593Smuzhiyun if (cause & CAUSEF_BD)
1638*4882a593Smuzhiyun opc += 1;
1639*4882a593Smuzhiyun err = kvm_get_badinstr(opc, vcpu, &inst.word);
1640*4882a593Smuzhiyun if (err) {
1641*4882a593Smuzhiyun run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1642*4882a593Smuzhiyun return RESUME_HOST;
1643*4882a593Smuzhiyun }
1644*4882a593Smuzhiyun
1645*4882a593Smuzhiyun /* Treat as MMIO */
1646*4882a593Smuzhiyun er = kvm_mips_emulate_load(inst, cause, vcpu);
1647*4882a593Smuzhiyun if (er == EMULATE_FAIL) {
1648*4882a593Smuzhiyun kvm_err("Guest Emulate Load from MMIO space failed: PC: %p, BadVaddr: %#lx\n",
1649*4882a593Smuzhiyun opc, badvaddr);
1650*4882a593Smuzhiyun run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1651*4882a593Smuzhiyun }
1652*4882a593Smuzhiyun }
1653*4882a593Smuzhiyun
1654*4882a593Smuzhiyun if (er == EMULATE_DONE) {
1655*4882a593Smuzhiyun ret = RESUME_GUEST;
1656*4882a593Smuzhiyun } else if (er == EMULATE_DO_MMIO) {
1657*4882a593Smuzhiyun run->exit_reason = KVM_EXIT_MMIO;
1658*4882a593Smuzhiyun ret = RESUME_HOST;
1659*4882a593Smuzhiyun } else {
1660*4882a593Smuzhiyun run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1661*4882a593Smuzhiyun ret = RESUME_HOST;
1662*4882a593Smuzhiyun }
1663*4882a593Smuzhiyun return ret;
1664*4882a593Smuzhiyun }
1665*4882a593Smuzhiyun
kvm_trap_vz_handle_tlb_st_miss(struct kvm_vcpu * vcpu)1666*4882a593Smuzhiyun static int kvm_trap_vz_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
1667*4882a593Smuzhiyun {
1668*4882a593Smuzhiyun struct kvm_run *run = vcpu->run;
1669*4882a593Smuzhiyun u32 *opc = (u32 *) vcpu->arch.pc;
1670*4882a593Smuzhiyun u32 cause = vcpu->arch.host_cp0_cause;
1671*4882a593Smuzhiyun ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
1672*4882a593Smuzhiyun union mips_instruction inst;
1673*4882a593Smuzhiyun enum emulation_result er = EMULATE_DONE;
1674*4882a593Smuzhiyun int err;
1675*4882a593Smuzhiyun int ret = RESUME_GUEST;
1676*4882a593Smuzhiyun
1677*4882a593Smuzhiyun /* Just try the access again if we couldn't do the translation */
1678*4882a593Smuzhiyun if (kvm_vz_badvaddr_to_gpa(vcpu, badvaddr, &badvaddr))
1679*4882a593Smuzhiyun return RESUME_GUEST;
1680*4882a593Smuzhiyun vcpu->arch.host_cp0_badvaddr = badvaddr;
1681*4882a593Smuzhiyun
1682*4882a593Smuzhiyun if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, true)) {
1683*4882a593Smuzhiyun /* Fetch the instruction */
1684*4882a593Smuzhiyun if (cause & CAUSEF_BD)
1685*4882a593Smuzhiyun opc += 1;
1686*4882a593Smuzhiyun err = kvm_get_badinstr(opc, vcpu, &inst.word);
1687*4882a593Smuzhiyun if (err) {
1688*4882a593Smuzhiyun run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1689*4882a593Smuzhiyun return RESUME_HOST;
1690*4882a593Smuzhiyun }
1691*4882a593Smuzhiyun
1692*4882a593Smuzhiyun /* Treat as MMIO */
1693*4882a593Smuzhiyun er = kvm_mips_emulate_store(inst, cause, vcpu);
1694*4882a593Smuzhiyun if (er == EMULATE_FAIL) {
1695*4882a593Smuzhiyun kvm_err("Guest Emulate Store to MMIO space failed: PC: %p, BadVaddr: %#lx\n",
1696*4882a593Smuzhiyun opc, badvaddr);
1697*4882a593Smuzhiyun run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1698*4882a593Smuzhiyun }
1699*4882a593Smuzhiyun }
1700*4882a593Smuzhiyun
1701*4882a593Smuzhiyun if (er == EMULATE_DONE) {
1702*4882a593Smuzhiyun ret = RESUME_GUEST;
1703*4882a593Smuzhiyun } else if (er == EMULATE_DO_MMIO) {
1704*4882a593Smuzhiyun run->exit_reason = KVM_EXIT_MMIO;
1705*4882a593Smuzhiyun ret = RESUME_HOST;
1706*4882a593Smuzhiyun } else {
1707*4882a593Smuzhiyun run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1708*4882a593Smuzhiyun ret = RESUME_HOST;
1709*4882a593Smuzhiyun }
1710*4882a593Smuzhiyun return ret;
1711*4882a593Smuzhiyun }
1712*4882a593Smuzhiyun
1713*4882a593Smuzhiyun static u64 kvm_vz_get_one_regs[] = {
1714*4882a593Smuzhiyun KVM_REG_MIPS_CP0_INDEX,
1715*4882a593Smuzhiyun KVM_REG_MIPS_CP0_ENTRYLO0,
1716*4882a593Smuzhiyun KVM_REG_MIPS_CP0_ENTRYLO1,
1717*4882a593Smuzhiyun KVM_REG_MIPS_CP0_CONTEXT,
1718*4882a593Smuzhiyun KVM_REG_MIPS_CP0_PAGEMASK,
1719*4882a593Smuzhiyun KVM_REG_MIPS_CP0_PAGEGRAIN,
1720*4882a593Smuzhiyun KVM_REG_MIPS_CP0_WIRED,
1721*4882a593Smuzhiyun KVM_REG_MIPS_CP0_HWRENA,
1722*4882a593Smuzhiyun KVM_REG_MIPS_CP0_BADVADDR,
1723*4882a593Smuzhiyun KVM_REG_MIPS_CP0_COUNT,
1724*4882a593Smuzhiyun KVM_REG_MIPS_CP0_ENTRYHI,
1725*4882a593Smuzhiyun KVM_REG_MIPS_CP0_COMPARE,
1726*4882a593Smuzhiyun KVM_REG_MIPS_CP0_STATUS,
1727*4882a593Smuzhiyun KVM_REG_MIPS_CP0_INTCTL,
1728*4882a593Smuzhiyun KVM_REG_MIPS_CP0_CAUSE,
1729*4882a593Smuzhiyun KVM_REG_MIPS_CP0_EPC,
1730*4882a593Smuzhiyun KVM_REG_MIPS_CP0_PRID,
1731*4882a593Smuzhiyun KVM_REG_MIPS_CP0_EBASE,
1732*4882a593Smuzhiyun KVM_REG_MIPS_CP0_CONFIG,
1733*4882a593Smuzhiyun KVM_REG_MIPS_CP0_CONFIG1,
1734*4882a593Smuzhiyun KVM_REG_MIPS_CP0_CONFIG2,
1735*4882a593Smuzhiyun KVM_REG_MIPS_CP0_CONFIG3,
1736*4882a593Smuzhiyun KVM_REG_MIPS_CP0_CONFIG4,
1737*4882a593Smuzhiyun KVM_REG_MIPS_CP0_CONFIG5,
1738*4882a593Smuzhiyun KVM_REG_MIPS_CP0_CONFIG6,
1739*4882a593Smuzhiyun #ifdef CONFIG_64BIT
1740*4882a593Smuzhiyun KVM_REG_MIPS_CP0_XCONTEXT,
1741*4882a593Smuzhiyun #endif
1742*4882a593Smuzhiyun KVM_REG_MIPS_CP0_ERROREPC,
1743*4882a593Smuzhiyun
1744*4882a593Smuzhiyun KVM_REG_MIPS_COUNT_CTL,
1745*4882a593Smuzhiyun KVM_REG_MIPS_COUNT_RESUME,
1746*4882a593Smuzhiyun KVM_REG_MIPS_COUNT_HZ,
1747*4882a593Smuzhiyun };
1748*4882a593Smuzhiyun
1749*4882a593Smuzhiyun static u64 kvm_vz_get_one_regs_contextconfig[] = {
1750*4882a593Smuzhiyun KVM_REG_MIPS_CP0_CONTEXTCONFIG,
1751*4882a593Smuzhiyun #ifdef CONFIG_64BIT
1752*4882a593Smuzhiyun KVM_REG_MIPS_CP0_XCONTEXTCONFIG,
1753*4882a593Smuzhiyun #endif
1754*4882a593Smuzhiyun };
1755*4882a593Smuzhiyun
1756*4882a593Smuzhiyun static u64 kvm_vz_get_one_regs_segments[] = {
1757*4882a593Smuzhiyun KVM_REG_MIPS_CP0_SEGCTL0,
1758*4882a593Smuzhiyun KVM_REG_MIPS_CP0_SEGCTL1,
1759*4882a593Smuzhiyun KVM_REG_MIPS_CP0_SEGCTL2,
1760*4882a593Smuzhiyun };
1761*4882a593Smuzhiyun
1762*4882a593Smuzhiyun static u64 kvm_vz_get_one_regs_htw[] = {
1763*4882a593Smuzhiyun KVM_REG_MIPS_CP0_PWBASE,
1764*4882a593Smuzhiyun KVM_REG_MIPS_CP0_PWFIELD,
1765*4882a593Smuzhiyun KVM_REG_MIPS_CP0_PWSIZE,
1766*4882a593Smuzhiyun KVM_REG_MIPS_CP0_PWCTL,
1767*4882a593Smuzhiyun };
1768*4882a593Smuzhiyun
1769*4882a593Smuzhiyun static u64 kvm_vz_get_one_regs_kscratch[] = {
1770*4882a593Smuzhiyun KVM_REG_MIPS_CP0_KSCRATCH1,
1771*4882a593Smuzhiyun KVM_REG_MIPS_CP0_KSCRATCH2,
1772*4882a593Smuzhiyun KVM_REG_MIPS_CP0_KSCRATCH3,
1773*4882a593Smuzhiyun KVM_REG_MIPS_CP0_KSCRATCH4,
1774*4882a593Smuzhiyun KVM_REG_MIPS_CP0_KSCRATCH5,
1775*4882a593Smuzhiyun KVM_REG_MIPS_CP0_KSCRATCH6,
1776*4882a593Smuzhiyun };
1777*4882a593Smuzhiyun
kvm_vz_num_regs(struct kvm_vcpu * vcpu)1778*4882a593Smuzhiyun static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu)
1779*4882a593Smuzhiyun {
1780*4882a593Smuzhiyun unsigned long ret;
1781*4882a593Smuzhiyun
1782*4882a593Smuzhiyun ret = ARRAY_SIZE(kvm_vz_get_one_regs);
1783*4882a593Smuzhiyun if (cpu_guest_has_userlocal)
1784*4882a593Smuzhiyun ++ret;
1785*4882a593Smuzhiyun if (cpu_guest_has_badinstr)
1786*4882a593Smuzhiyun ++ret;
1787*4882a593Smuzhiyun if (cpu_guest_has_badinstrp)
1788*4882a593Smuzhiyun ++ret;
1789*4882a593Smuzhiyun if (cpu_guest_has_contextconfig)
1790*4882a593Smuzhiyun ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
1791*4882a593Smuzhiyun if (cpu_guest_has_segments)
1792*4882a593Smuzhiyun ret += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
1793*4882a593Smuzhiyun if (cpu_guest_has_htw || cpu_guest_has_ldpte)
1794*4882a593Smuzhiyun ret += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
1795*4882a593Smuzhiyun if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar)
1796*4882a593Smuzhiyun ret += 1 + ARRAY_SIZE(vcpu->arch.maar);
1797*4882a593Smuzhiyun ret += __arch_hweight8(cpu_data[0].guest.kscratch_mask);
1798*4882a593Smuzhiyun
1799*4882a593Smuzhiyun return ret;
1800*4882a593Smuzhiyun }
1801*4882a593Smuzhiyun
kvm_vz_copy_reg_indices(struct kvm_vcpu * vcpu,u64 __user * indices)1802*4882a593Smuzhiyun static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
1803*4882a593Smuzhiyun {
1804*4882a593Smuzhiyun u64 index;
1805*4882a593Smuzhiyun unsigned int i;
1806*4882a593Smuzhiyun
1807*4882a593Smuzhiyun if (copy_to_user(indices, kvm_vz_get_one_regs,
1808*4882a593Smuzhiyun sizeof(kvm_vz_get_one_regs)))
1809*4882a593Smuzhiyun return -EFAULT;
1810*4882a593Smuzhiyun indices += ARRAY_SIZE(kvm_vz_get_one_regs);
1811*4882a593Smuzhiyun
1812*4882a593Smuzhiyun if (cpu_guest_has_userlocal) {
1813*4882a593Smuzhiyun index = KVM_REG_MIPS_CP0_USERLOCAL;
1814*4882a593Smuzhiyun if (copy_to_user(indices, &index, sizeof(index)))
1815*4882a593Smuzhiyun return -EFAULT;
1816*4882a593Smuzhiyun ++indices;
1817*4882a593Smuzhiyun }
1818*4882a593Smuzhiyun if (cpu_guest_has_badinstr) {
1819*4882a593Smuzhiyun index = KVM_REG_MIPS_CP0_BADINSTR;
1820*4882a593Smuzhiyun if (copy_to_user(indices, &index, sizeof(index)))
1821*4882a593Smuzhiyun return -EFAULT;
1822*4882a593Smuzhiyun ++indices;
1823*4882a593Smuzhiyun }
1824*4882a593Smuzhiyun if (cpu_guest_has_badinstrp) {
1825*4882a593Smuzhiyun index = KVM_REG_MIPS_CP0_BADINSTRP;
1826*4882a593Smuzhiyun if (copy_to_user(indices, &index, sizeof(index)))
1827*4882a593Smuzhiyun return -EFAULT;
1828*4882a593Smuzhiyun ++indices;
1829*4882a593Smuzhiyun }
1830*4882a593Smuzhiyun if (cpu_guest_has_contextconfig) {
1831*4882a593Smuzhiyun if (copy_to_user(indices, kvm_vz_get_one_regs_contextconfig,
1832*4882a593Smuzhiyun sizeof(kvm_vz_get_one_regs_contextconfig)))
1833*4882a593Smuzhiyun return -EFAULT;
1834*4882a593Smuzhiyun indices += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
1835*4882a593Smuzhiyun }
1836*4882a593Smuzhiyun if (cpu_guest_has_segments) {
1837*4882a593Smuzhiyun if (copy_to_user(indices, kvm_vz_get_one_regs_segments,
1838*4882a593Smuzhiyun sizeof(kvm_vz_get_one_regs_segments)))
1839*4882a593Smuzhiyun return -EFAULT;
1840*4882a593Smuzhiyun indices += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
1841*4882a593Smuzhiyun }
1842*4882a593Smuzhiyun if (cpu_guest_has_htw || cpu_guest_has_ldpte) {
1843*4882a593Smuzhiyun if (copy_to_user(indices, kvm_vz_get_one_regs_htw,
1844*4882a593Smuzhiyun sizeof(kvm_vz_get_one_regs_htw)))
1845*4882a593Smuzhiyun return -EFAULT;
1846*4882a593Smuzhiyun indices += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
1847*4882a593Smuzhiyun }
1848*4882a593Smuzhiyun if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar) {
1849*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(vcpu->arch.maar); ++i) {
1850*4882a593Smuzhiyun index = KVM_REG_MIPS_CP0_MAAR(i);
1851*4882a593Smuzhiyun if (copy_to_user(indices, &index, sizeof(index)))
1852*4882a593Smuzhiyun return -EFAULT;
1853*4882a593Smuzhiyun ++indices;
1854*4882a593Smuzhiyun }
1855*4882a593Smuzhiyun
1856*4882a593Smuzhiyun index = KVM_REG_MIPS_CP0_MAARI;
1857*4882a593Smuzhiyun if (copy_to_user(indices, &index, sizeof(index)))
1858*4882a593Smuzhiyun return -EFAULT;
1859*4882a593Smuzhiyun ++indices;
1860*4882a593Smuzhiyun }
1861*4882a593Smuzhiyun for (i = 0; i < 6; ++i) {
1862*4882a593Smuzhiyun if (!cpu_guest_has_kscr(i + 2))
1863*4882a593Smuzhiyun continue;
1864*4882a593Smuzhiyun
1865*4882a593Smuzhiyun if (copy_to_user(indices, &kvm_vz_get_one_regs_kscratch[i],
1866*4882a593Smuzhiyun sizeof(kvm_vz_get_one_regs_kscratch[i])))
1867*4882a593Smuzhiyun return -EFAULT;
1868*4882a593Smuzhiyun ++indices;
1869*4882a593Smuzhiyun }
1870*4882a593Smuzhiyun
1871*4882a593Smuzhiyun return 0;
1872*4882a593Smuzhiyun }
1873*4882a593Smuzhiyun
entrylo_kvm_to_user(unsigned long v)1874*4882a593Smuzhiyun static inline s64 entrylo_kvm_to_user(unsigned long v)
1875*4882a593Smuzhiyun {
1876*4882a593Smuzhiyun s64 mask, ret = v;
1877*4882a593Smuzhiyun
1878*4882a593Smuzhiyun if (BITS_PER_LONG == 32) {
1879*4882a593Smuzhiyun /*
1880*4882a593Smuzhiyun * KVM API exposes 64-bit version of the register, so move the
1881*4882a593Smuzhiyun * RI/XI bits up into place.
1882*4882a593Smuzhiyun */
1883*4882a593Smuzhiyun mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
1884*4882a593Smuzhiyun ret &= ~mask;
1885*4882a593Smuzhiyun ret |= ((s64)v & mask) << 32;
1886*4882a593Smuzhiyun }
1887*4882a593Smuzhiyun return ret;
1888*4882a593Smuzhiyun }
1889*4882a593Smuzhiyun
entrylo_user_to_kvm(s64 v)1890*4882a593Smuzhiyun static inline unsigned long entrylo_user_to_kvm(s64 v)
1891*4882a593Smuzhiyun {
1892*4882a593Smuzhiyun unsigned long mask, ret = v;
1893*4882a593Smuzhiyun
1894*4882a593Smuzhiyun if (BITS_PER_LONG == 32) {
1895*4882a593Smuzhiyun /*
1896*4882a593Smuzhiyun * KVM API exposes 64-bit versiono of the register, so move the
1897*4882a593Smuzhiyun * RI/XI bits down into place.
1898*4882a593Smuzhiyun */
1899*4882a593Smuzhiyun mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
1900*4882a593Smuzhiyun ret &= ~mask;
1901*4882a593Smuzhiyun ret |= (v >> 32) & mask;
1902*4882a593Smuzhiyun }
1903*4882a593Smuzhiyun return ret;
1904*4882a593Smuzhiyun }
1905*4882a593Smuzhiyun
kvm_vz_get_one_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,s64 * v)1906*4882a593Smuzhiyun static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
1907*4882a593Smuzhiyun const struct kvm_one_reg *reg,
1908*4882a593Smuzhiyun s64 *v)
1909*4882a593Smuzhiyun {
1910*4882a593Smuzhiyun struct mips_coproc *cop0 = vcpu->arch.cop0;
1911*4882a593Smuzhiyun unsigned int idx;
1912*4882a593Smuzhiyun
1913*4882a593Smuzhiyun switch (reg->id) {
1914*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_INDEX:
1915*4882a593Smuzhiyun *v = (long)read_gc0_index();
1916*4882a593Smuzhiyun break;
1917*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_ENTRYLO0:
1918*4882a593Smuzhiyun *v = entrylo_kvm_to_user(read_gc0_entrylo0());
1919*4882a593Smuzhiyun break;
1920*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_ENTRYLO1:
1921*4882a593Smuzhiyun *v = entrylo_kvm_to_user(read_gc0_entrylo1());
1922*4882a593Smuzhiyun break;
1923*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_CONTEXT:
1924*4882a593Smuzhiyun *v = (long)read_gc0_context();
1925*4882a593Smuzhiyun break;
1926*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
1927*4882a593Smuzhiyun if (!cpu_guest_has_contextconfig)
1928*4882a593Smuzhiyun return -EINVAL;
1929*4882a593Smuzhiyun *v = read_gc0_contextconfig();
1930*4882a593Smuzhiyun break;
1931*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_USERLOCAL:
1932*4882a593Smuzhiyun if (!cpu_guest_has_userlocal)
1933*4882a593Smuzhiyun return -EINVAL;
1934*4882a593Smuzhiyun *v = read_gc0_userlocal();
1935*4882a593Smuzhiyun break;
1936*4882a593Smuzhiyun #ifdef CONFIG_64BIT
1937*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
1938*4882a593Smuzhiyun if (!cpu_guest_has_contextconfig)
1939*4882a593Smuzhiyun return -EINVAL;
1940*4882a593Smuzhiyun *v = read_gc0_xcontextconfig();
1941*4882a593Smuzhiyun break;
1942*4882a593Smuzhiyun #endif
1943*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_PAGEMASK:
1944*4882a593Smuzhiyun *v = (long)read_gc0_pagemask();
1945*4882a593Smuzhiyun break;
1946*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_PAGEGRAIN:
1947*4882a593Smuzhiyun *v = (long)read_gc0_pagegrain();
1948*4882a593Smuzhiyun break;
1949*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_SEGCTL0:
1950*4882a593Smuzhiyun if (!cpu_guest_has_segments)
1951*4882a593Smuzhiyun return -EINVAL;
1952*4882a593Smuzhiyun *v = read_gc0_segctl0();
1953*4882a593Smuzhiyun break;
1954*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_SEGCTL1:
1955*4882a593Smuzhiyun if (!cpu_guest_has_segments)
1956*4882a593Smuzhiyun return -EINVAL;
1957*4882a593Smuzhiyun *v = read_gc0_segctl1();
1958*4882a593Smuzhiyun break;
1959*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_SEGCTL2:
1960*4882a593Smuzhiyun if (!cpu_guest_has_segments)
1961*4882a593Smuzhiyun return -EINVAL;
1962*4882a593Smuzhiyun *v = read_gc0_segctl2();
1963*4882a593Smuzhiyun break;
1964*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_PWBASE:
1965*4882a593Smuzhiyun if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
1966*4882a593Smuzhiyun return -EINVAL;
1967*4882a593Smuzhiyun *v = read_gc0_pwbase();
1968*4882a593Smuzhiyun break;
1969*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_PWFIELD:
1970*4882a593Smuzhiyun if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
1971*4882a593Smuzhiyun return -EINVAL;
1972*4882a593Smuzhiyun *v = read_gc0_pwfield();
1973*4882a593Smuzhiyun break;
1974*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_PWSIZE:
1975*4882a593Smuzhiyun if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
1976*4882a593Smuzhiyun return -EINVAL;
1977*4882a593Smuzhiyun *v = read_gc0_pwsize();
1978*4882a593Smuzhiyun break;
1979*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_WIRED:
1980*4882a593Smuzhiyun *v = (long)read_gc0_wired();
1981*4882a593Smuzhiyun break;
1982*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_PWCTL:
1983*4882a593Smuzhiyun if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
1984*4882a593Smuzhiyun return -EINVAL;
1985*4882a593Smuzhiyun *v = read_gc0_pwctl();
1986*4882a593Smuzhiyun break;
1987*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_HWRENA:
1988*4882a593Smuzhiyun *v = (long)read_gc0_hwrena();
1989*4882a593Smuzhiyun break;
1990*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_BADVADDR:
1991*4882a593Smuzhiyun *v = (long)read_gc0_badvaddr();
1992*4882a593Smuzhiyun break;
1993*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_BADINSTR:
1994*4882a593Smuzhiyun if (!cpu_guest_has_badinstr)
1995*4882a593Smuzhiyun return -EINVAL;
1996*4882a593Smuzhiyun *v = read_gc0_badinstr();
1997*4882a593Smuzhiyun break;
1998*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_BADINSTRP:
1999*4882a593Smuzhiyun if (!cpu_guest_has_badinstrp)
2000*4882a593Smuzhiyun return -EINVAL;
2001*4882a593Smuzhiyun *v = read_gc0_badinstrp();
2002*4882a593Smuzhiyun break;
2003*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_COUNT:
2004*4882a593Smuzhiyun *v = kvm_mips_read_count(vcpu);
2005*4882a593Smuzhiyun break;
2006*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_ENTRYHI:
2007*4882a593Smuzhiyun *v = (long)read_gc0_entryhi();
2008*4882a593Smuzhiyun break;
2009*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_COMPARE:
2010*4882a593Smuzhiyun *v = (long)read_gc0_compare();
2011*4882a593Smuzhiyun break;
2012*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_STATUS:
2013*4882a593Smuzhiyun *v = (long)read_gc0_status();
2014*4882a593Smuzhiyun break;
2015*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_INTCTL:
2016*4882a593Smuzhiyun *v = read_gc0_intctl();
2017*4882a593Smuzhiyun break;
2018*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_CAUSE:
2019*4882a593Smuzhiyun *v = (long)read_gc0_cause();
2020*4882a593Smuzhiyun break;
2021*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_EPC:
2022*4882a593Smuzhiyun *v = (long)read_gc0_epc();
2023*4882a593Smuzhiyun break;
2024*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_PRID:
2025*4882a593Smuzhiyun switch (boot_cpu_type()) {
2026*4882a593Smuzhiyun case CPU_CAVIUM_OCTEON3:
2027*4882a593Smuzhiyun /* Octeon III has a read-only guest.PRid */
2028*4882a593Smuzhiyun *v = read_gc0_prid();
2029*4882a593Smuzhiyun break;
2030*4882a593Smuzhiyun default:
2031*4882a593Smuzhiyun *v = (long)kvm_read_c0_guest_prid(cop0);
2032*4882a593Smuzhiyun break;
2033*4882a593Smuzhiyun }
2034*4882a593Smuzhiyun break;
2035*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_EBASE:
2036*4882a593Smuzhiyun *v = kvm_vz_read_gc0_ebase();
2037*4882a593Smuzhiyun break;
2038*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_CONFIG:
2039*4882a593Smuzhiyun *v = read_gc0_config();
2040*4882a593Smuzhiyun break;
2041*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_CONFIG1:
2042*4882a593Smuzhiyun if (!cpu_guest_has_conf1)
2043*4882a593Smuzhiyun return -EINVAL;
2044*4882a593Smuzhiyun *v = read_gc0_config1();
2045*4882a593Smuzhiyun break;
2046*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_CONFIG2:
2047*4882a593Smuzhiyun if (!cpu_guest_has_conf2)
2048*4882a593Smuzhiyun return -EINVAL;
2049*4882a593Smuzhiyun *v = read_gc0_config2();
2050*4882a593Smuzhiyun break;
2051*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_CONFIG3:
2052*4882a593Smuzhiyun if (!cpu_guest_has_conf3)
2053*4882a593Smuzhiyun return -EINVAL;
2054*4882a593Smuzhiyun *v = read_gc0_config3();
2055*4882a593Smuzhiyun break;
2056*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_CONFIG4:
2057*4882a593Smuzhiyun if (!cpu_guest_has_conf4)
2058*4882a593Smuzhiyun return -EINVAL;
2059*4882a593Smuzhiyun *v = read_gc0_config4();
2060*4882a593Smuzhiyun break;
2061*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_CONFIG5:
2062*4882a593Smuzhiyun if (!cpu_guest_has_conf5)
2063*4882a593Smuzhiyun return -EINVAL;
2064*4882a593Smuzhiyun *v = read_gc0_config5();
2065*4882a593Smuzhiyun break;
2066*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_CONFIG6:
2067*4882a593Smuzhiyun *v = kvm_read_sw_gc0_config6(cop0);
2068*4882a593Smuzhiyun break;
2069*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
2070*4882a593Smuzhiyun if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
2071*4882a593Smuzhiyun return -EINVAL;
2072*4882a593Smuzhiyun idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0);
2073*4882a593Smuzhiyun if (idx >= ARRAY_SIZE(vcpu->arch.maar))
2074*4882a593Smuzhiyun return -EINVAL;
2075*4882a593Smuzhiyun *v = vcpu->arch.maar[idx];
2076*4882a593Smuzhiyun break;
2077*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_MAARI:
2078*4882a593Smuzhiyun if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
2079*4882a593Smuzhiyun return -EINVAL;
2080*4882a593Smuzhiyun *v = kvm_read_sw_gc0_maari(vcpu->arch.cop0);
2081*4882a593Smuzhiyun break;
2082*4882a593Smuzhiyun #ifdef CONFIG_64BIT
2083*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_XCONTEXT:
2084*4882a593Smuzhiyun *v = read_gc0_xcontext();
2085*4882a593Smuzhiyun break;
2086*4882a593Smuzhiyun #endif
2087*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_ERROREPC:
2088*4882a593Smuzhiyun *v = (long)read_gc0_errorepc();
2089*4882a593Smuzhiyun break;
2090*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
2091*4882a593Smuzhiyun idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
2092*4882a593Smuzhiyun if (!cpu_guest_has_kscr(idx))
2093*4882a593Smuzhiyun return -EINVAL;
2094*4882a593Smuzhiyun switch (idx) {
2095*4882a593Smuzhiyun case 2:
2096*4882a593Smuzhiyun *v = (long)read_gc0_kscratch1();
2097*4882a593Smuzhiyun break;
2098*4882a593Smuzhiyun case 3:
2099*4882a593Smuzhiyun *v = (long)read_gc0_kscratch2();
2100*4882a593Smuzhiyun break;
2101*4882a593Smuzhiyun case 4:
2102*4882a593Smuzhiyun *v = (long)read_gc0_kscratch3();
2103*4882a593Smuzhiyun break;
2104*4882a593Smuzhiyun case 5:
2105*4882a593Smuzhiyun *v = (long)read_gc0_kscratch4();
2106*4882a593Smuzhiyun break;
2107*4882a593Smuzhiyun case 6:
2108*4882a593Smuzhiyun *v = (long)read_gc0_kscratch5();
2109*4882a593Smuzhiyun break;
2110*4882a593Smuzhiyun case 7:
2111*4882a593Smuzhiyun *v = (long)read_gc0_kscratch6();
2112*4882a593Smuzhiyun break;
2113*4882a593Smuzhiyun }
2114*4882a593Smuzhiyun break;
2115*4882a593Smuzhiyun case KVM_REG_MIPS_COUNT_CTL:
2116*4882a593Smuzhiyun *v = vcpu->arch.count_ctl;
2117*4882a593Smuzhiyun break;
2118*4882a593Smuzhiyun case KVM_REG_MIPS_COUNT_RESUME:
2119*4882a593Smuzhiyun *v = ktime_to_ns(vcpu->arch.count_resume);
2120*4882a593Smuzhiyun break;
2121*4882a593Smuzhiyun case KVM_REG_MIPS_COUNT_HZ:
2122*4882a593Smuzhiyun *v = vcpu->arch.count_hz;
2123*4882a593Smuzhiyun break;
2124*4882a593Smuzhiyun default:
2125*4882a593Smuzhiyun return -EINVAL;
2126*4882a593Smuzhiyun }
2127*4882a593Smuzhiyun return 0;
2128*4882a593Smuzhiyun }
2129*4882a593Smuzhiyun
kvm_vz_set_one_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,s64 v)2130*4882a593Smuzhiyun static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
2131*4882a593Smuzhiyun const struct kvm_one_reg *reg,
2132*4882a593Smuzhiyun s64 v)
2133*4882a593Smuzhiyun {
2134*4882a593Smuzhiyun struct mips_coproc *cop0 = vcpu->arch.cop0;
2135*4882a593Smuzhiyun unsigned int idx;
2136*4882a593Smuzhiyun int ret = 0;
2137*4882a593Smuzhiyun unsigned int cur, change;
2138*4882a593Smuzhiyun
2139*4882a593Smuzhiyun switch (reg->id) {
2140*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_INDEX:
2141*4882a593Smuzhiyun write_gc0_index(v);
2142*4882a593Smuzhiyun break;
2143*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_ENTRYLO0:
2144*4882a593Smuzhiyun write_gc0_entrylo0(entrylo_user_to_kvm(v));
2145*4882a593Smuzhiyun break;
2146*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_ENTRYLO1:
2147*4882a593Smuzhiyun write_gc0_entrylo1(entrylo_user_to_kvm(v));
2148*4882a593Smuzhiyun break;
2149*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_CONTEXT:
2150*4882a593Smuzhiyun write_gc0_context(v);
2151*4882a593Smuzhiyun break;
2152*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
2153*4882a593Smuzhiyun if (!cpu_guest_has_contextconfig)
2154*4882a593Smuzhiyun return -EINVAL;
2155*4882a593Smuzhiyun write_gc0_contextconfig(v);
2156*4882a593Smuzhiyun break;
2157*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_USERLOCAL:
2158*4882a593Smuzhiyun if (!cpu_guest_has_userlocal)
2159*4882a593Smuzhiyun return -EINVAL;
2160*4882a593Smuzhiyun write_gc0_userlocal(v);
2161*4882a593Smuzhiyun break;
2162*4882a593Smuzhiyun #ifdef CONFIG_64BIT
2163*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
2164*4882a593Smuzhiyun if (!cpu_guest_has_contextconfig)
2165*4882a593Smuzhiyun return -EINVAL;
2166*4882a593Smuzhiyun write_gc0_xcontextconfig(v);
2167*4882a593Smuzhiyun break;
2168*4882a593Smuzhiyun #endif
2169*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_PAGEMASK:
2170*4882a593Smuzhiyun write_gc0_pagemask(v);
2171*4882a593Smuzhiyun break;
2172*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_PAGEGRAIN:
2173*4882a593Smuzhiyun write_gc0_pagegrain(v);
2174*4882a593Smuzhiyun break;
2175*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_SEGCTL0:
2176*4882a593Smuzhiyun if (!cpu_guest_has_segments)
2177*4882a593Smuzhiyun return -EINVAL;
2178*4882a593Smuzhiyun write_gc0_segctl0(v);
2179*4882a593Smuzhiyun break;
2180*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_SEGCTL1:
2181*4882a593Smuzhiyun if (!cpu_guest_has_segments)
2182*4882a593Smuzhiyun return -EINVAL;
2183*4882a593Smuzhiyun write_gc0_segctl1(v);
2184*4882a593Smuzhiyun break;
2185*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_SEGCTL2:
2186*4882a593Smuzhiyun if (!cpu_guest_has_segments)
2187*4882a593Smuzhiyun return -EINVAL;
2188*4882a593Smuzhiyun write_gc0_segctl2(v);
2189*4882a593Smuzhiyun break;
2190*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_PWBASE:
2191*4882a593Smuzhiyun if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
2192*4882a593Smuzhiyun return -EINVAL;
2193*4882a593Smuzhiyun write_gc0_pwbase(v);
2194*4882a593Smuzhiyun break;
2195*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_PWFIELD:
2196*4882a593Smuzhiyun if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
2197*4882a593Smuzhiyun return -EINVAL;
2198*4882a593Smuzhiyun write_gc0_pwfield(v);
2199*4882a593Smuzhiyun break;
2200*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_PWSIZE:
2201*4882a593Smuzhiyun if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
2202*4882a593Smuzhiyun return -EINVAL;
2203*4882a593Smuzhiyun write_gc0_pwsize(v);
2204*4882a593Smuzhiyun break;
2205*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_WIRED:
2206*4882a593Smuzhiyun change_gc0_wired(MIPSR6_WIRED_WIRED, v);
2207*4882a593Smuzhiyun break;
2208*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_PWCTL:
2209*4882a593Smuzhiyun if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
2210*4882a593Smuzhiyun return -EINVAL;
2211*4882a593Smuzhiyun write_gc0_pwctl(v);
2212*4882a593Smuzhiyun break;
2213*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_HWRENA:
2214*4882a593Smuzhiyun write_gc0_hwrena(v);
2215*4882a593Smuzhiyun break;
2216*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_BADVADDR:
2217*4882a593Smuzhiyun write_gc0_badvaddr(v);
2218*4882a593Smuzhiyun break;
2219*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_BADINSTR:
2220*4882a593Smuzhiyun if (!cpu_guest_has_badinstr)
2221*4882a593Smuzhiyun return -EINVAL;
2222*4882a593Smuzhiyun write_gc0_badinstr(v);
2223*4882a593Smuzhiyun break;
2224*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_BADINSTRP:
2225*4882a593Smuzhiyun if (!cpu_guest_has_badinstrp)
2226*4882a593Smuzhiyun return -EINVAL;
2227*4882a593Smuzhiyun write_gc0_badinstrp(v);
2228*4882a593Smuzhiyun break;
2229*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_COUNT:
2230*4882a593Smuzhiyun kvm_mips_write_count(vcpu, v);
2231*4882a593Smuzhiyun break;
2232*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_ENTRYHI:
2233*4882a593Smuzhiyun write_gc0_entryhi(v);
2234*4882a593Smuzhiyun break;
2235*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_COMPARE:
2236*4882a593Smuzhiyun kvm_mips_write_compare(vcpu, v, false);
2237*4882a593Smuzhiyun break;
2238*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_STATUS:
2239*4882a593Smuzhiyun write_gc0_status(v);
2240*4882a593Smuzhiyun break;
2241*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_INTCTL:
2242*4882a593Smuzhiyun write_gc0_intctl(v);
2243*4882a593Smuzhiyun break;
2244*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_CAUSE:
2245*4882a593Smuzhiyun /*
2246*4882a593Smuzhiyun * If the timer is stopped or started (DC bit) it must look
2247*4882a593Smuzhiyun * atomic with changes to the timer interrupt pending bit (TI).
2248*4882a593Smuzhiyun * A timer interrupt should not happen in between.
2249*4882a593Smuzhiyun */
2250*4882a593Smuzhiyun if ((read_gc0_cause() ^ v) & CAUSEF_DC) {
2251*4882a593Smuzhiyun if (v & CAUSEF_DC) {
2252*4882a593Smuzhiyun /* disable timer first */
2253*4882a593Smuzhiyun kvm_mips_count_disable_cause(vcpu);
2254*4882a593Smuzhiyun change_gc0_cause((u32)~CAUSEF_DC, v);
2255*4882a593Smuzhiyun } else {
2256*4882a593Smuzhiyun /* enable timer last */
2257*4882a593Smuzhiyun change_gc0_cause((u32)~CAUSEF_DC, v);
2258*4882a593Smuzhiyun kvm_mips_count_enable_cause(vcpu);
2259*4882a593Smuzhiyun }
2260*4882a593Smuzhiyun } else {
2261*4882a593Smuzhiyun write_gc0_cause(v);
2262*4882a593Smuzhiyun }
2263*4882a593Smuzhiyun break;
2264*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_EPC:
2265*4882a593Smuzhiyun write_gc0_epc(v);
2266*4882a593Smuzhiyun break;
2267*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_PRID:
2268*4882a593Smuzhiyun switch (boot_cpu_type()) {
2269*4882a593Smuzhiyun case CPU_CAVIUM_OCTEON3:
2270*4882a593Smuzhiyun /* Octeon III has a guest.PRid, but its read-only */
2271*4882a593Smuzhiyun break;
2272*4882a593Smuzhiyun default:
2273*4882a593Smuzhiyun kvm_write_c0_guest_prid(cop0, v);
2274*4882a593Smuzhiyun break;
2275*4882a593Smuzhiyun }
2276*4882a593Smuzhiyun break;
2277*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_EBASE:
2278*4882a593Smuzhiyun kvm_vz_write_gc0_ebase(v);
2279*4882a593Smuzhiyun break;
2280*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_CONFIG:
2281*4882a593Smuzhiyun cur = read_gc0_config();
2282*4882a593Smuzhiyun change = (cur ^ v) & kvm_vz_config_user_wrmask(vcpu);
2283*4882a593Smuzhiyun if (change) {
2284*4882a593Smuzhiyun v = cur ^ change;
2285*4882a593Smuzhiyun write_gc0_config(v);
2286*4882a593Smuzhiyun }
2287*4882a593Smuzhiyun break;
2288*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_CONFIG1:
2289*4882a593Smuzhiyun if (!cpu_guest_has_conf1)
2290*4882a593Smuzhiyun break;
2291*4882a593Smuzhiyun cur = read_gc0_config1();
2292*4882a593Smuzhiyun change = (cur ^ v) & kvm_vz_config1_user_wrmask(vcpu);
2293*4882a593Smuzhiyun if (change) {
2294*4882a593Smuzhiyun v = cur ^ change;
2295*4882a593Smuzhiyun write_gc0_config1(v);
2296*4882a593Smuzhiyun }
2297*4882a593Smuzhiyun break;
2298*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_CONFIG2:
2299*4882a593Smuzhiyun if (!cpu_guest_has_conf2)
2300*4882a593Smuzhiyun break;
2301*4882a593Smuzhiyun cur = read_gc0_config2();
2302*4882a593Smuzhiyun change = (cur ^ v) & kvm_vz_config2_user_wrmask(vcpu);
2303*4882a593Smuzhiyun if (change) {
2304*4882a593Smuzhiyun v = cur ^ change;
2305*4882a593Smuzhiyun write_gc0_config2(v);
2306*4882a593Smuzhiyun }
2307*4882a593Smuzhiyun break;
2308*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_CONFIG3:
2309*4882a593Smuzhiyun if (!cpu_guest_has_conf3)
2310*4882a593Smuzhiyun break;
2311*4882a593Smuzhiyun cur = read_gc0_config3();
2312*4882a593Smuzhiyun change = (cur ^ v) & kvm_vz_config3_user_wrmask(vcpu);
2313*4882a593Smuzhiyun if (change) {
2314*4882a593Smuzhiyun v = cur ^ change;
2315*4882a593Smuzhiyun write_gc0_config3(v);
2316*4882a593Smuzhiyun }
2317*4882a593Smuzhiyun break;
2318*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_CONFIG4:
2319*4882a593Smuzhiyun if (!cpu_guest_has_conf4)
2320*4882a593Smuzhiyun break;
2321*4882a593Smuzhiyun cur = read_gc0_config4();
2322*4882a593Smuzhiyun change = (cur ^ v) & kvm_vz_config4_user_wrmask(vcpu);
2323*4882a593Smuzhiyun if (change) {
2324*4882a593Smuzhiyun v = cur ^ change;
2325*4882a593Smuzhiyun write_gc0_config4(v);
2326*4882a593Smuzhiyun }
2327*4882a593Smuzhiyun break;
2328*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_CONFIG5:
2329*4882a593Smuzhiyun if (!cpu_guest_has_conf5)
2330*4882a593Smuzhiyun break;
2331*4882a593Smuzhiyun cur = read_gc0_config5();
2332*4882a593Smuzhiyun change = (cur ^ v) & kvm_vz_config5_user_wrmask(vcpu);
2333*4882a593Smuzhiyun if (change) {
2334*4882a593Smuzhiyun v = cur ^ change;
2335*4882a593Smuzhiyun write_gc0_config5(v);
2336*4882a593Smuzhiyun }
2337*4882a593Smuzhiyun break;
2338*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_CONFIG6:
2339*4882a593Smuzhiyun cur = kvm_read_sw_gc0_config6(cop0);
2340*4882a593Smuzhiyun change = (cur ^ v) & kvm_vz_config6_user_wrmask(vcpu);
2341*4882a593Smuzhiyun if (change) {
2342*4882a593Smuzhiyun v = cur ^ change;
2343*4882a593Smuzhiyun kvm_write_sw_gc0_config6(cop0, (int)v);
2344*4882a593Smuzhiyun }
2345*4882a593Smuzhiyun break;
2346*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
2347*4882a593Smuzhiyun if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
2348*4882a593Smuzhiyun return -EINVAL;
2349*4882a593Smuzhiyun idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0);
2350*4882a593Smuzhiyun if (idx >= ARRAY_SIZE(vcpu->arch.maar))
2351*4882a593Smuzhiyun return -EINVAL;
2352*4882a593Smuzhiyun vcpu->arch.maar[idx] = mips_process_maar(dmtc_op, v);
2353*4882a593Smuzhiyun break;
2354*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_MAARI:
2355*4882a593Smuzhiyun if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
2356*4882a593Smuzhiyun return -EINVAL;
2357*4882a593Smuzhiyun kvm_write_maari(vcpu, v);
2358*4882a593Smuzhiyun break;
2359*4882a593Smuzhiyun #ifdef CONFIG_64BIT
2360*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_XCONTEXT:
2361*4882a593Smuzhiyun write_gc0_xcontext(v);
2362*4882a593Smuzhiyun break;
2363*4882a593Smuzhiyun #endif
2364*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_ERROREPC:
2365*4882a593Smuzhiyun write_gc0_errorepc(v);
2366*4882a593Smuzhiyun break;
2367*4882a593Smuzhiyun case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
2368*4882a593Smuzhiyun idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
2369*4882a593Smuzhiyun if (!cpu_guest_has_kscr(idx))
2370*4882a593Smuzhiyun return -EINVAL;
2371*4882a593Smuzhiyun switch (idx) {
2372*4882a593Smuzhiyun case 2:
2373*4882a593Smuzhiyun write_gc0_kscratch1(v);
2374*4882a593Smuzhiyun break;
2375*4882a593Smuzhiyun case 3:
2376*4882a593Smuzhiyun write_gc0_kscratch2(v);
2377*4882a593Smuzhiyun break;
2378*4882a593Smuzhiyun case 4:
2379*4882a593Smuzhiyun write_gc0_kscratch3(v);
2380*4882a593Smuzhiyun break;
2381*4882a593Smuzhiyun case 5:
2382*4882a593Smuzhiyun write_gc0_kscratch4(v);
2383*4882a593Smuzhiyun break;
2384*4882a593Smuzhiyun case 6:
2385*4882a593Smuzhiyun write_gc0_kscratch5(v);
2386*4882a593Smuzhiyun break;
2387*4882a593Smuzhiyun case 7:
2388*4882a593Smuzhiyun write_gc0_kscratch6(v);
2389*4882a593Smuzhiyun break;
2390*4882a593Smuzhiyun }
2391*4882a593Smuzhiyun break;
2392*4882a593Smuzhiyun case KVM_REG_MIPS_COUNT_CTL:
2393*4882a593Smuzhiyun ret = kvm_mips_set_count_ctl(vcpu, v);
2394*4882a593Smuzhiyun break;
2395*4882a593Smuzhiyun case KVM_REG_MIPS_COUNT_RESUME:
2396*4882a593Smuzhiyun ret = kvm_mips_set_count_resume(vcpu, v);
2397*4882a593Smuzhiyun break;
2398*4882a593Smuzhiyun case KVM_REG_MIPS_COUNT_HZ:
2399*4882a593Smuzhiyun ret = kvm_mips_set_count_hz(vcpu, v);
2400*4882a593Smuzhiyun break;
2401*4882a593Smuzhiyun default:
2402*4882a593Smuzhiyun return -EINVAL;
2403*4882a593Smuzhiyun }
2404*4882a593Smuzhiyun return ret;
2405*4882a593Smuzhiyun }
2406*4882a593Smuzhiyun
2407*4882a593Smuzhiyun #define guestid_cache(cpu) (cpu_data[cpu].guestid_cache)
kvm_vz_get_new_guestid(unsigned long cpu,struct kvm_vcpu * vcpu)2408*4882a593Smuzhiyun static void kvm_vz_get_new_guestid(unsigned long cpu, struct kvm_vcpu *vcpu)
2409*4882a593Smuzhiyun {
2410*4882a593Smuzhiyun unsigned long guestid = guestid_cache(cpu);
2411*4882a593Smuzhiyun
2412*4882a593Smuzhiyun if (!(++guestid & GUESTID_MASK)) {
2413*4882a593Smuzhiyun if (cpu_has_vtag_icache)
2414*4882a593Smuzhiyun flush_icache_all();
2415*4882a593Smuzhiyun
2416*4882a593Smuzhiyun if (!guestid) /* fix version if needed */
2417*4882a593Smuzhiyun guestid = GUESTID_FIRST_VERSION;
2418*4882a593Smuzhiyun
2419*4882a593Smuzhiyun ++guestid; /* guestid 0 reserved for root */
2420*4882a593Smuzhiyun
2421*4882a593Smuzhiyun /* start new guestid cycle */
2422*4882a593Smuzhiyun kvm_vz_local_flush_roottlb_all_guests();
2423*4882a593Smuzhiyun kvm_vz_local_flush_guesttlb_all();
2424*4882a593Smuzhiyun }
2425*4882a593Smuzhiyun
2426*4882a593Smuzhiyun guestid_cache(cpu) = guestid;
2427*4882a593Smuzhiyun }
2428*4882a593Smuzhiyun
2429*4882a593Smuzhiyun /* Returns 1 if the guest TLB may be clobbered */
kvm_vz_check_requests(struct kvm_vcpu * vcpu,int cpu)2430*4882a593Smuzhiyun static int kvm_vz_check_requests(struct kvm_vcpu *vcpu, int cpu)
2431*4882a593Smuzhiyun {
2432*4882a593Smuzhiyun int ret = 0;
2433*4882a593Smuzhiyun int i;
2434*4882a593Smuzhiyun
2435*4882a593Smuzhiyun if (!kvm_request_pending(vcpu))
2436*4882a593Smuzhiyun return 0;
2437*4882a593Smuzhiyun
2438*4882a593Smuzhiyun if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2439*4882a593Smuzhiyun if (cpu_has_guestid) {
2440*4882a593Smuzhiyun /* Drop all GuestIDs for this VCPU */
2441*4882a593Smuzhiyun for_each_possible_cpu(i)
2442*4882a593Smuzhiyun vcpu->arch.vzguestid[i] = 0;
2443*4882a593Smuzhiyun /* This will clobber guest TLB contents too */
2444*4882a593Smuzhiyun ret = 1;
2445*4882a593Smuzhiyun }
2446*4882a593Smuzhiyun /*
2447*4882a593Smuzhiyun * For Root ASID Dealias (RAD) we don't do anything here, but we
2448*4882a593Smuzhiyun * still need the request to ensure we recheck asid_flush_mask.
2449*4882a593Smuzhiyun * We can still return 0 as only the root TLB will be affected
2450*4882a593Smuzhiyun * by a root ASID flush.
2451*4882a593Smuzhiyun */
2452*4882a593Smuzhiyun }
2453*4882a593Smuzhiyun
2454*4882a593Smuzhiyun return ret;
2455*4882a593Smuzhiyun }
2456*4882a593Smuzhiyun
kvm_vz_vcpu_save_wired(struct kvm_vcpu * vcpu)2457*4882a593Smuzhiyun static void kvm_vz_vcpu_save_wired(struct kvm_vcpu *vcpu)
2458*4882a593Smuzhiyun {
2459*4882a593Smuzhiyun unsigned int wired = read_gc0_wired();
2460*4882a593Smuzhiyun struct kvm_mips_tlb *tlbs;
2461*4882a593Smuzhiyun int i;
2462*4882a593Smuzhiyun
2463*4882a593Smuzhiyun /* Expand the wired TLB array if necessary */
2464*4882a593Smuzhiyun wired &= MIPSR6_WIRED_WIRED;
2465*4882a593Smuzhiyun if (wired > vcpu->arch.wired_tlb_limit) {
2466*4882a593Smuzhiyun tlbs = krealloc(vcpu->arch.wired_tlb, wired *
2467*4882a593Smuzhiyun sizeof(*vcpu->arch.wired_tlb), GFP_ATOMIC);
2468*4882a593Smuzhiyun if (WARN_ON(!tlbs)) {
2469*4882a593Smuzhiyun /* Save whatever we can */
2470*4882a593Smuzhiyun wired = vcpu->arch.wired_tlb_limit;
2471*4882a593Smuzhiyun } else {
2472*4882a593Smuzhiyun vcpu->arch.wired_tlb = tlbs;
2473*4882a593Smuzhiyun vcpu->arch.wired_tlb_limit = wired;
2474*4882a593Smuzhiyun }
2475*4882a593Smuzhiyun }
2476*4882a593Smuzhiyun
2477*4882a593Smuzhiyun if (wired)
2478*4882a593Smuzhiyun /* Save wired entries from the guest TLB */
2479*4882a593Smuzhiyun kvm_vz_save_guesttlb(vcpu->arch.wired_tlb, 0, wired);
2480*4882a593Smuzhiyun /* Invalidate any dropped entries since last time */
2481*4882a593Smuzhiyun for (i = wired; i < vcpu->arch.wired_tlb_used; ++i) {
2482*4882a593Smuzhiyun vcpu->arch.wired_tlb[i].tlb_hi = UNIQUE_GUEST_ENTRYHI(i);
2483*4882a593Smuzhiyun vcpu->arch.wired_tlb[i].tlb_lo[0] = 0;
2484*4882a593Smuzhiyun vcpu->arch.wired_tlb[i].tlb_lo[1] = 0;
2485*4882a593Smuzhiyun vcpu->arch.wired_tlb[i].tlb_mask = 0;
2486*4882a593Smuzhiyun }
2487*4882a593Smuzhiyun vcpu->arch.wired_tlb_used = wired;
2488*4882a593Smuzhiyun }
2489*4882a593Smuzhiyun
kvm_vz_vcpu_load_wired(struct kvm_vcpu * vcpu)2490*4882a593Smuzhiyun static void kvm_vz_vcpu_load_wired(struct kvm_vcpu *vcpu)
2491*4882a593Smuzhiyun {
2492*4882a593Smuzhiyun /* Load wired entries into the guest TLB */
2493*4882a593Smuzhiyun if (vcpu->arch.wired_tlb)
2494*4882a593Smuzhiyun kvm_vz_load_guesttlb(vcpu->arch.wired_tlb, 0,
2495*4882a593Smuzhiyun vcpu->arch.wired_tlb_used);
2496*4882a593Smuzhiyun }
2497*4882a593Smuzhiyun
kvm_vz_vcpu_load_tlb(struct kvm_vcpu * vcpu,int cpu)2498*4882a593Smuzhiyun static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu)
2499*4882a593Smuzhiyun {
2500*4882a593Smuzhiyun struct kvm *kvm = vcpu->kvm;
2501*4882a593Smuzhiyun struct mm_struct *gpa_mm = &kvm->arch.gpa_mm;
2502*4882a593Smuzhiyun bool migrated;
2503*4882a593Smuzhiyun
2504*4882a593Smuzhiyun /*
2505*4882a593Smuzhiyun * Are we entering guest context on a different CPU to last time?
2506*4882a593Smuzhiyun * If so, the VCPU's guest TLB state on this CPU may be stale.
2507*4882a593Smuzhiyun */
2508*4882a593Smuzhiyun migrated = (vcpu->arch.last_exec_cpu != cpu);
2509*4882a593Smuzhiyun vcpu->arch.last_exec_cpu = cpu;
2510*4882a593Smuzhiyun
2511*4882a593Smuzhiyun /*
2512*4882a593Smuzhiyun * A vcpu's GuestID is set in GuestCtl1.ID when the vcpu is loaded and
2513*4882a593Smuzhiyun * remains set until another vcpu is loaded in. As a rule GuestRID
2514*4882a593Smuzhiyun * remains zeroed when in root context unless the kernel is busy
2515*4882a593Smuzhiyun * manipulating guest tlb entries.
2516*4882a593Smuzhiyun */
2517*4882a593Smuzhiyun if (cpu_has_guestid) {
2518*4882a593Smuzhiyun /*
2519*4882a593Smuzhiyun * Check if our GuestID is of an older version and thus invalid.
2520*4882a593Smuzhiyun *
2521*4882a593Smuzhiyun * We also discard the stored GuestID if we've executed on
2522*4882a593Smuzhiyun * another CPU, as the guest mappings may have changed without
2523*4882a593Smuzhiyun * hypervisor knowledge.
2524*4882a593Smuzhiyun */
2525*4882a593Smuzhiyun if (migrated ||
2526*4882a593Smuzhiyun (vcpu->arch.vzguestid[cpu] ^ guestid_cache(cpu)) &
2527*4882a593Smuzhiyun GUESTID_VERSION_MASK) {
2528*4882a593Smuzhiyun kvm_vz_get_new_guestid(cpu, vcpu);
2529*4882a593Smuzhiyun vcpu->arch.vzguestid[cpu] = guestid_cache(cpu);
2530*4882a593Smuzhiyun trace_kvm_guestid_change(vcpu,
2531*4882a593Smuzhiyun vcpu->arch.vzguestid[cpu]);
2532*4882a593Smuzhiyun }
2533*4882a593Smuzhiyun
2534*4882a593Smuzhiyun /* Restore GuestID */
2535*4882a593Smuzhiyun change_c0_guestctl1(GUESTID_MASK, vcpu->arch.vzguestid[cpu]);
2536*4882a593Smuzhiyun } else {
2537*4882a593Smuzhiyun /*
2538*4882a593Smuzhiyun * The Guest TLB only stores a single guest's TLB state, so
2539*4882a593Smuzhiyun * flush it if another VCPU has executed on this CPU.
2540*4882a593Smuzhiyun *
2541*4882a593Smuzhiyun * We also flush if we've executed on another CPU, as the guest
2542*4882a593Smuzhiyun * mappings may have changed without hypervisor knowledge.
2543*4882a593Smuzhiyun */
2544*4882a593Smuzhiyun if (migrated || last_exec_vcpu[cpu] != vcpu)
2545*4882a593Smuzhiyun kvm_vz_local_flush_guesttlb_all();
2546*4882a593Smuzhiyun last_exec_vcpu[cpu] = vcpu;
2547*4882a593Smuzhiyun
2548*4882a593Smuzhiyun /*
2549*4882a593Smuzhiyun * Root ASID dealiases guest GPA mappings in the root TLB.
2550*4882a593Smuzhiyun * Allocate new root ASID if needed.
2551*4882a593Smuzhiyun */
2552*4882a593Smuzhiyun if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask))
2553*4882a593Smuzhiyun get_new_mmu_context(gpa_mm);
2554*4882a593Smuzhiyun else
2555*4882a593Smuzhiyun check_mmu_context(gpa_mm);
2556*4882a593Smuzhiyun }
2557*4882a593Smuzhiyun }
2558*4882a593Smuzhiyun
kvm_vz_vcpu_load(struct kvm_vcpu * vcpu,int cpu)2559*4882a593Smuzhiyun static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2560*4882a593Smuzhiyun {
2561*4882a593Smuzhiyun struct mips_coproc *cop0 = vcpu->arch.cop0;
2562*4882a593Smuzhiyun bool migrated, all;
2563*4882a593Smuzhiyun
2564*4882a593Smuzhiyun /*
2565*4882a593Smuzhiyun * Have we migrated to a different CPU?
2566*4882a593Smuzhiyun * If so, any old guest TLB state may be stale.
2567*4882a593Smuzhiyun */
2568*4882a593Smuzhiyun migrated = (vcpu->arch.last_sched_cpu != cpu);
2569*4882a593Smuzhiyun
2570*4882a593Smuzhiyun /*
2571*4882a593Smuzhiyun * Was this the last VCPU to run on this CPU?
2572*4882a593Smuzhiyun * If not, any old guest state from this VCPU will have been clobbered.
2573*4882a593Smuzhiyun */
2574*4882a593Smuzhiyun all = migrated || (last_vcpu[cpu] != vcpu);
2575*4882a593Smuzhiyun last_vcpu[cpu] = vcpu;
2576*4882a593Smuzhiyun
2577*4882a593Smuzhiyun /*
2578*4882a593Smuzhiyun * Restore CP0_Wired unconditionally as we clear it after use, and
2579*4882a593Smuzhiyun * restore wired guest TLB entries (while in guest context).
2580*4882a593Smuzhiyun */
2581*4882a593Smuzhiyun kvm_restore_gc0_wired(cop0);
2582*4882a593Smuzhiyun if (current->flags & PF_VCPU) {
2583*4882a593Smuzhiyun tlbw_use_hazard();
2584*4882a593Smuzhiyun kvm_vz_vcpu_load_tlb(vcpu, cpu);
2585*4882a593Smuzhiyun kvm_vz_vcpu_load_wired(vcpu);
2586*4882a593Smuzhiyun }
2587*4882a593Smuzhiyun
2588*4882a593Smuzhiyun /*
2589*4882a593Smuzhiyun * Restore timer state regardless, as e.g. Cause.TI can change over time
2590*4882a593Smuzhiyun * if left unmaintained.
2591*4882a593Smuzhiyun */
2592*4882a593Smuzhiyun kvm_vz_restore_timer(vcpu);
2593*4882a593Smuzhiyun
2594*4882a593Smuzhiyun /* Set MC bit if we want to trace guest mode changes */
2595*4882a593Smuzhiyun if (kvm_trace_guest_mode_change)
2596*4882a593Smuzhiyun set_c0_guestctl0(MIPS_GCTL0_MC);
2597*4882a593Smuzhiyun else
2598*4882a593Smuzhiyun clear_c0_guestctl0(MIPS_GCTL0_MC);
2599*4882a593Smuzhiyun
2600*4882a593Smuzhiyun /* Don't bother restoring registers multiple times unless necessary */
2601*4882a593Smuzhiyun if (!all)
2602*4882a593Smuzhiyun return 0;
2603*4882a593Smuzhiyun
2604*4882a593Smuzhiyun /*
2605*4882a593Smuzhiyun * Restore config registers first, as some implementations restrict
2606*4882a593Smuzhiyun * writes to other registers when the corresponding feature bits aren't
2607*4882a593Smuzhiyun * set. For example Status.CU1 cannot be set unless Config1.FP is set.
2608*4882a593Smuzhiyun */
2609*4882a593Smuzhiyun kvm_restore_gc0_config(cop0);
2610*4882a593Smuzhiyun if (cpu_guest_has_conf1)
2611*4882a593Smuzhiyun kvm_restore_gc0_config1(cop0);
2612*4882a593Smuzhiyun if (cpu_guest_has_conf2)
2613*4882a593Smuzhiyun kvm_restore_gc0_config2(cop0);
2614*4882a593Smuzhiyun if (cpu_guest_has_conf3)
2615*4882a593Smuzhiyun kvm_restore_gc0_config3(cop0);
2616*4882a593Smuzhiyun if (cpu_guest_has_conf4)
2617*4882a593Smuzhiyun kvm_restore_gc0_config4(cop0);
2618*4882a593Smuzhiyun if (cpu_guest_has_conf5)
2619*4882a593Smuzhiyun kvm_restore_gc0_config5(cop0);
2620*4882a593Smuzhiyun if (cpu_guest_has_conf6)
2621*4882a593Smuzhiyun kvm_restore_gc0_config6(cop0);
2622*4882a593Smuzhiyun if (cpu_guest_has_conf7)
2623*4882a593Smuzhiyun kvm_restore_gc0_config7(cop0);
2624*4882a593Smuzhiyun
2625*4882a593Smuzhiyun kvm_restore_gc0_index(cop0);
2626*4882a593Smuzhiyun kvm_restore_gc0_entrylo0(cop0);
2627*4882a593Smuzhiyun kvm_restore_gc0_entrylo1(cop0);
2628*4882a593Smuzhiyun kvm_restore_gc0_context(cop0);
2629*4882a593Smuzhiyun if (cpu_guest_has_contextconfig)
2630*4882a593Smuzhiyun kvm_restore_gc0_contextconfig(cop0);
2631*4882a593Smuzhiyun #ifdef CONFIG_64BIT
2632*4882a593Smuzhiyun kvm_restore_gc0_xcontext(cop0);
2633*4882a593Smuzhiyun if (cpu_guest_has_contextconfig)
2634*4882a593Smuzhiyun kvm_restore_gc0_xcontextconfig(cop0);
2635*4882a593Smuzhiyun #endif
2636*4882a593Smuzhiyun kvm_restore_gc0_pagemask(cop0);
2637*4882a593Smuzhiyun kvm_restore_gc0_pagegrain(cop0);
2638*4882a593Smuzhiyun kvm_restore_gc0_hwrena(cop0);
2639*4882a593Smuzhiyun kvm_restore_gc0_badvaddr(cop0);
2640*4882a593Smuzhiyun kvm_restore_gc0_entryhi(cop0);
2641*4882a593Smuzhiyun kvm_restore_gc0_status(cop0);
2642*4882a593Smuzhiyun kvm_restore_gc0_intctl(cop0);
2643*4882a593Smuzhiyun kvm_restore_gc0_epc(cop0);
2644*4882a593Smuzhiyun kvm_vz_write_gc0_ebase(kvm_read_sw_gc0_ebase(cop0));
2645*4882a593Smuzhiyun if (cpu_guest_has_userlocal)
2646*4882a593Smuzhiyun kvm_restore_gc0_userlocal(cop0);
2647*4882a593Smuzhiyun
2648*4882a593Smuzhiyun kvm_restore_gc0_errorepc(cop0);
2649*4882a593Smuzhiyun
2650*4882a593Smuzhiyun /* restore KScratch registers if enabled in guest */
2651*4882a593Smuzhiyun if (cpu_guest_has_conf4) {
2652*4882a593Smuzhiyun if (cpu_guest_has_kscr(2))
2653*4882a593Smuzhiyun kvm_restore_gc0_kscratch1(cop0);
2654*4882a593Smuzhiyun if (cpu_guest_has_kscr(3))
2655*4882a593Smuzhiyun kvm_restore_gc0_kscratch2(cop0);
2656*4882a593Smuzhiyun if (cpu_guest_has_kscr(4))
2657*4882a593Smuzhiyun kvm_restore_gc0_kscratch3(cop0);
2658*4882a593Smuzhiyun if (cpu_guest_has_kscr(5))
2659*4882a593Smuzhiyun kvm_restore_gc0_kscratch4(cop0);
2660*4882a593Smuzhiyun if (cpu_guest_has_kscr(6))
2661*4882a593Smuzhiyun kvm_restore_gc0_kscratch5(cop0);
2662*4882a593Smuzhiyun if (cpu_guest_has_kscr(7))
2663*4882a593Smuzhiyun kvm_restore_gc0_kscratch6(cop0);
2664*4882a593Smuzhiyun }
2665*4882a593Smuzhiyun
2666*4882a593Smuzhiyun if (cpu_guest_has_badinstr)
2667*4882a593Smuzhiyun kvm_restore_gc0_badinstr(cop0);
2668*4882a593Smuzhiyun if (cpu_guest_has_badinstrp)
2669*4882a593Smuzhiyun kvm_restore_gc0_badinstrp(cop0);
2670*4882a593Smuzhiyun
2671*4882a593Smuzhiyun if (cpu_guest_has_segments) {
2672*4882a593Smuzhiyun kvm_restore_gc0_segctl0(cop0);
2673*4882a593Smuzhiyun kvm_restore_gc0_segctl1(cop0);
2674*4882a593Smuzhiyun kvm_restore_gc0_segctl2(cop0);
2675*4882a593Smuzhiyun }
2676*4882a593Smuzhiyun
2677*4882a593Smuzhiyun /* restore HTW registers */
2678*4882a593Smuzhiyun if (cpu_guest_has_htw || cpu_guest_has_ldpte) {
2679*4882a593Smuzhiyun kvm_restore_gc0_pwbase(cop0);
2680*4882a593Smuzhiyun kvm_restore_gc0_pwfield(cop0);
2681*4882a593Smuzhiyun kvm_restore_gc0_pwsize(cop0);
2682*4882a593Smuzhiyun kvm_restore_gc0_pwctl(cop0);
2683*4882a593Smuzhiyun }
2684*4882a593Smuzhiyun
2685*4882a593Smuzhiyun /* restore Root.GuestCtl2 from unused Guest guestctl2 register */
2686*4882a593Smuzhiyun if (cpu_has_guestctl2)
2687*4882a593Smuzhiyun write_c0_guestctl2(
2688*4882a593Smuzhiyun cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL]);
2689*4882a593Smuzhiyun
2690*4882a593Smuzhiyun /*
2691*4882a593Smuzhiyun * We should clear linked load bit to break interrupted atomics. This
2692*4882a593Smuzhiyun * prevents a SC on the next VCPU from succeeding by matching a LL on
2693*4882a593Smuzhiyun * the previous VCPU.
2694*4882a593Smuzhiyun */
2695*4882a593Smuzhiyun if (vcpu->kvm->created_vcpus > 1)
2696*4882a593Smuzhiyun write_gc0_lladdr(0);
2697*4882a593Smuzhiyun
2698*4882a593Smuzhiyun return 0;
2699*4882a593Smuzhiyun }
2700*4882a593Smuzhiyun
kvm_vz_vcpu_put(struct kvm_vcpu * vcpu,int cpu)2701*4882a593Smuzhiyun static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
2702*4882a593Smuzhiyun {
2703*4882a593Smuzhiyun struct mips_coproc *cop0 = vcpu->arch.cop0;
2704*4882a593Smuzhiyun
2705*4882a593Smuzhiyun if (current->flags & PF_VCPU)
2706*4882a593Smuzhiyun kvm_vz_vcpu_save_wired(vcpu);
2707*4882a593Smuzhiyun
2708*4882a593Smuzhiyun kvm_lose_fpu(vcpu);
2709*4882a593Smuzhiyun
2710*4882a593Smuzhiyun kvm_save_gc0_index(cop0);
2711*4882a593Smuzhiyun kvm_save_gc0_entrylo0(cop0);
2712*4882a593Smuzhiyun kvm_save_gc0_entrylo1(cop0);
2713*4882a593Smuzhiyun kvm_save_gc0_context(cop0);
2714*4882a593Smuzhiyun if (cpu_guest_has_contextconfig)
2715*4882a593Smuzhiyun kvm_save_gc0_contextconfig(cop0);
2716*4882a593Smuzhiyun #ifdef CONFIG_64BIT
2717*4882a593Smuzhiyun kvm_save_gc0_xcontext(cop0);
2718*4882a593Smuzhiyun if (cpu_guest_has_contextconfig)
2719*4882a593Smuzhiyun kvm_save_gc0_xcontextconfig(cop0);
2720*4882a593Smuzhiyun #endif
2721*4882a593Smuzhiyun kvm_save_gc0_pagemask(cop0);
2722*4882a593Smuzhiyun kvm_save_gc0_pagegrain(cop0);
2723*4882a593Smuzhiyun kvm_save_gc0_wired(cop0);
2724*4882a593Smuzhiyun /* allow wired TLB entries to be overwritten */
2725*4882a593Smuzhiyun clear_gc0_wired(MIPSR6_WIRED_WIRED);
2726*4882a593Smuzhiyun kvm_save_gc0_hwrena(cop0);
2727*4882a593Smuzhiyun kvm_save_gc0_badvaddr(cop0);
2728*4882a593Smuzhiyun kvm_save_gc0_entryhi(cop0);
2729*4882a593Smuzhiyun kvm_save_gc0_status(cop0);
2730*4882a593Smuzhiyun kvm_save_gc0_intctl(cop0);
2731*4882a593Smuzhiyun kvm_save_gc0_epc(cop0);
2732*4882a593Smuzhiyun kvm_write_sw_gc0_ebase(cop0, kvm_vz_read_gc0_ebase());
2733*4882a593Smuzhiyun if (cpu_guest_has_userlocal)
2734*4882a593Smuzhiyun kvm_save_gc0_userlocal(cop0);
2735*4882a593Smuzhiyun
2736*4882a593Smuzhiyun /* only save implemented config registers */
2737*4882a593Smuzhiyun kvm_save_gc0_config(cop0);
2738*4882a593Smuzhiyun if (cpu_guest_has_conf1)
2739*4882a593Smuzhiyun kvm_save_gc0_config1(cop0);
2740*4882a593Smuzhiyun if (cpu_guest_has_conf2)
2741*4882a593Smuzhiyun kvm_save_gc0_config2(cop0);
2742*4882a593Smuzhiyun if (cpu_guest_has_conf3)
2743*4882a593Smuzhiyun kvm_save_gc0_config3(cop0);
2744*4882a593Smuzhiyun if (cpu_guest_has_conf4)
2745*4882a593Smuzhiyun kvm_save_gc0_config4(cop0);
2746*4882a593Smuzhiyun if (cpu_guest_has_conf5)
2747*4882a593Smuzhiyun kvm_save_gc0_config5(cop0);
2748*4882a593Smuzhiyun if (cpu_guest_has_conf6)
2749*4882a593Smuzhiyun kvm_save_gc0_config6(cop0);
2750*4882a593Smuzhiyun if (cpu_guest_has_conf7)
2751*4882a593Smuzhiyun kvm_save_gc0_config7(cop0);
2752*4882a593Smuzhiyun
2753*4882a593Smuzhiyun kvm_save_gc0_errorepc(cop0);
2754*4882a593Smuzhiyun
2755*4882a593Smuzhiyun /* save KScratch registers if enabled in guest */
2756*4882a593Smuzhiyun if (cpu_guest_has_conf4) {
2757*4882a593Smuzhiyun if (cpu_guest_has_kscr(2))
2758*4882a593Smuzhiyun kvm_save_gc0_kscratch1(cop0);
2759*4882a593Smuzhiyun if (cpu_guest_has_kscr(3))
2760*4882a593Smuzhiyun kvm_save_gc0_kscratch2(cop0);
2761*4882a593Smuzhiyun if (cpu_guest_has_kscr(4))
2762*4882a593Smuzhiyun kvm_save_gc0_kscratch3(cop0);
2763*4882a593Smuzhiyun if (cpu_guest_has_kscr(5))
2764*4882a593Smuzhiyun kvm_save_gc0_kscratch4(cop0);
2765*4882a593Smuzhiyun if (cpu_guest_has_kscr(6))
2766*4882a593Smuzhiyun kvm_save_gc0_kscratch5(cop0);
2767*4882a593Smuzhiyun if (cpu_guest_has_kscr(7))
2768*4882a593Smuzhiyun kvm_save_gc0_kscratch6(cop0);
2769*4882a593Smuzhiyun }
2770*4882a593Smuzhiyun
2771*4882a593Smuzhiyun if (cpu_guest_has_badinstr)
2772*4882a593Smuzhiyun kvm_save_gc0_badinstr(cop0);
2773*4882a593Smuzhiyun if (cpu_guest_has_badinstrp)
2774*4882a593Smuzhiyun kvm_save_gc0_badinstrp(cop0);
2775*4882a593Smuzhiyun
2776*4882a593Smuzhiyun if (cpu_guest_has_segments) {
2777*4882a593Smuzhiyun kvm_save_gc0_segctl0(cop0);
2778*4882a593Smuzhiyun kvm_save_gc0_segctl1(cop0);
2779*4882a593Smuzhiyun kvm_save_gc0_segctl2(cop0);
2780*4882a593Smuzhiyun }
2781*4882a593Smuzhiyun
2782*4882a593Smuzhiyun /* save HTW registers if enabled in guest */
2783*4882a593Smuzhiyun if (cpu_guest_has_ldpte || (cpu_guest_has_htw &&
2784*4882a593Smuzhiyun kvm_read_sw_gc0_config3(cop0) & MIPS_CONF3_PW)) {
2785*4882a593Smuzhiyun kvm_save_gc0_pwbase(cop0);
2786*4882a593Smuzhiyun kvm_save_gc0_pwfield(cop0);
2787*4882a593Smuzhiyun kvm_save_gc0_pwsize(cop0);
2788*4882a593Smuzhiyun kvm_save_gc0_pwctl(cop0);
2789*4882a593Smuzhiyun }
2790*4882a593Smuzhiyun
2791*4882a593Smuzhiyun kvm_vz_save_timer(vcpu);
2792*4882a593Smuzhiyun
2793*4882a593Smuzhiyun /* save Root.GuestCtl2 in unused Guest guestctl2 register */
2794*4882a593Smuzhiyun if (cpu_has_guestctl2)
2795*4882a593Smuzhiyun cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] =
2796*4882a593Smuzhiyun read_c0_guestctl2();
2797*4882a593Smuzhiyun
2798*4882a593Smuzhiyun return 0;
2799*4882a593Smuzhiyun }
2800*4882a593Smuzhiyun
2801*4882a593Smuzhiyun /**
2802*4882a593Smuzhiyun * kvm_vz_resize_guest_vtlb() - Attempt to resize guest VTLB.
2803*4882a593Smuzhiyun * @size: Number of guest VTLB entries (0 < @size <= root VTLB entries).
2804*4882a593Smuzhiyun *
2805*4882a593Smuzhiyun * Attempt to resize the guest VTLB by writing guest Config registers. This is
2806*4882a593Smuzhiyun * necessary for cores with a shared root/guest TLB to avoid overlap with wired
2807*4882a593Smuzhiyun * entries in the root VTLB.
2808*4882a593Smuzhiyun *
2809*4882a593Smuzhiyun * Returns: The resulting guest VTLB size.
2810*4882a593Smuzhiyun */
kvm_vz_resize_guest_vtlb(unsigned int size)2811*4882a593Smuzhiyun static unsigned int kvm_vz_resize_guest_vtlb(unsigned int size)
2812*4882a593Smuzhiyun {
2813*4882a593Smuzhiyun unsigned int config4 = 0, ret = 0, limit;
2814*4882a593Smuzhiyun
2815*4882a593Smuzhiyun /* Write MMUSize - 1 into guest Config registers */
2816*4882a593Smuzhiyun if (cpu_guest_has_conf1)
2817*4882a593Smuzhiyun change_gc0_config1(MIPS_CONF1_TLBS,
2818*4882a593Smuzhiyun (size - 1) << MIPS_CONF1_TLBS_SHIFT);
2819*4882a593Smuzhiyun if (cpu_guest_has_conf4) {
2820*4882a593Smuzhiyun config4 = read_gc0_config4();
2821*4882a593Smuzhiyun if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) ==
2822*4882a593Smuzhiyun MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT) {
2823*4882a593Smuzhiyun config4 &= ~MIPS_CONF4_VTLBSIZEEXT;
2824*4882a593Smuzhiyun config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) <<
2825*4882a593Smuzhiyun MIPS_CONF4_VTLBSIZEEXT_SHIFT;
2826*4882a593Smuzhiyun } else if ((config4 & MIPS_CONF4_MMUEXTDEF) ==
2827*4882a593Smuzhiyun MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT) {
2828*4882a593Smuzhiyun config4 &= ~MIPS_CONF4_MMUSIZEEXT;
2829*4882a593Smuzhiyun config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) <<
2830*4882a593Smuzhiyun MIPS_CONF4_MMUSIZEEXT_SHIFT;
2831*4882a593Smuzhiyun }
2832*4882a593Smuzhiyun write_gc0_config4(config4);
2833*4882a593Smuzhiyun }
2834*4882a593Smuzhiyun
2835*4882a593Smuzhiyun /*
2836*4882a593Smuzhiyun * Set Guest.Wired.Limit = 0 (no limit up to Guest.MMUSize-1), unless it
2837*4882a593Smuzhiyun * would exceed Root.Wired.Limit (clearing Guest.Wired.Wired so write
2838*4882a593Smuzhiyun * not dropped)
2839*4882a593Smuzhiyun */
2840*4882a593Smuzhiyun if (cpu_has_mips_r6) {
2841*4882a593Smuzhiyun limit = (read_c0_wired() & MIPSR6_WIRED_LIMIT) >>
2842*4882a593Smuzhiyun MIPSR6_WIRED_LIMIT_SHIFT;
2843*4882a593Smuzhiyun if (size - 1 <= limit)
2844*4882a593Smuzhiyun limit = 0;
2845*4882a593Smuzhiyun write_gc0_wired(limit << MIPSR6_WIRED_LIMIT_SHIFT);
2846*4882a593Smuzhiyun }
2847*4882a593Smuzhiyun
2848*4882a593Smuzhiyun /* Read back MMUSize - 1 */
2849*4882a593Smuzhiyun back_to_back_c0_hazard();
2850*4882a593Smuzhiyun if (cpu_guest_has_conf1)
2851*4882a593Smuzhiyun ret = (read_gc0_config1() & MIPS_CONF1_TLBS) >>
2852*4882a593Smuzhiyun MIPS_CONF1_TLBS_SHIFT;
2853*4882a593Smuzhiyun if (config4) {
2854*4882a593Smuzhiyun if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) ==
2855*4882a593Smuzhiyun MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT)
2856*4882a593Smuzhiyun ret |= ((config4 & MIPS_CONF4_VTLBSIZEEXT) >>
2857*4882a593Smuzhiyun MIPS_CONF4_VTLBSIZEEXT_SHIFT) <<
2858*4882a593Smuzhiyun MIPS_CONF1_TLBS_SIZE;
2859*4882a593Smuzhiyun else if ((config4 & MIPS_CONF4_MMUEXTDEF) ==
2860*4882a593Smuzhiyun MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT)
2861*4882a593Smuzhiyun ret |= ((config4 & MIPS_CONF4_MMUSIZEEXT) >>
2862*4882a593Smuzhiyun MIPS_CONF4_MMUSIZEEXT_SHIFT) <<
2863*4882a593Smuzhiyun MIPS_CONF1_TLBS_SIZE;
2864*4882a593Smuzhiyun }
2865*4882a593Smuzhiyun return ret + 1;
2866*4882a593Smuzhiyun }
2867*4882a593Smuzhiyun
kvm_vz_hardware_enable(void)2868*4882a593Smuzhiyun static int kvm_vz_hardware_enable(void)
2869*4882a593Smuzhiyun {
2870*4882a593Smuzhiyun unsigned int mmu_size, guest_mmu_size, ftlb_size;
2871*4882a593Smuzhiyun u64 guest_cvmctl, cvmvmconfig;
2872*4882a593Smuzhiyun
2873*4882a593Smuzhiyun switch (current_cpu_type()) {
2874*4882a593Smuzhiyun case CPU_CAVIUM_OCTEON3:
2875*4882a593Smuzhiyun /* Set up guest timer/perfcount IRQ lines */
2876*4882a593Smuzhiyun guest_cvmctl = read_gc0_cvmctl();
2877*4882a593Smuzhiyun guest_cvmctl &= ~CVMCTL_IPTI;
2878*4882a593Smuzhiyun guest_cvmctl |= 7ull << CVMCTL_IPTI_SHIFT;
2879*4882a593Smuzhiyun guest_cvmctl &= ~CVMCTL_IPPCI;
2880*4882a593Smuzhiyun guest_cvmctl |= 6ull << CVMCTL_IPPCI_SHIFT;
2881*4882a593Smuzhiyun write_gc0_cvmctl(guest_cvmctl);
2882*4882a593Smuzhiyun
2883*4882a593Smuzhiyun cvmvmconfig = read_c0_cvmvmconfig();
2884*4882a593Smuzhiyun /* No I/O hole translation. */
2885*4882a593Smuzhiyun cvmvmconfig |= CVMVMCONF_DGHT;
2886*4882a593Smuzhiyun /* Halve the root MMU size */
2887*4882a593Smuzhiyun mmu_size = ((cvmvmconfig & CVMVMCONF_MMUSIZEM1)
2888*4882a593Smuzhiyun >> CVMVMCONF_MMUSIZEM1_S) + 1;
2889*4882a593Smuzhiyun guest_mmu_size = mmu_size / 2;
2890*4882a593Smuzhiyun mmu_size -= guest_mmu_size;
2891*4882a593Smuzhiyun cvmvmconfig &= ~CVMVMCONF_RMMUSIZEM1;
2892*4882a593Smuzhiyun cvmvmconfig |= mmu_size - 1;
2893*4882a593Smuzhiyun write_c0_cvmvmconfig(cvmvmconfig);
2894*4882a593Smuzhiyun
2895*4882a593Smuzhiyun /* Update our records */
2896*4882a593Smuzhiyun current_cpu_data.tlbsize = mmu_size;
2897*4882a593Smuzhiyun current_cpu_data.tlbsizevtlb = mmu_size;
2898*4882a593Smuzhiyun current_cpu_data.guest.tlbsize = guest_mmu_size;
2899*4882a593Smuzhiyun
2900*4882a593Smuzhiyun /* Flush moved entries in new (guest) context */
2901*4882a593Smuzhiyun kvm_vz_local_flush_guesttlb_all();
2902*4882a593Smuzhiyun break;
2903*4882a593Smuzhiyun default:
2904*4882a593Smuzhiyun /*
2905*4882a593Smuzhiyun * ImgTec cores tend to use a shared root/guest TLB. To avoid
2906*4882a593Smuzhiyun * overlap of root wired and guest entries, the guest TLB may
2907*4882a593Smuzhiyun * need resizing.
2908*4882a593Smuzhiyun */
2909*4882a593Smuzhiyun mmu_size = current_cpu_data.tlbsizevtlb;
2910*4882a593Smuzhiyun ftlb_size = current_cpu_data.tlbsize - mmu_size;
2911*4882a593Smuzhiyun
2912*4882a593Smuzhiyun /* Try switching to maximum guest VTLB size for flush */
2913*4882a593Smuzhiyun guest_mmu_size = kvm_vz_resize_guest_vtlb(mmu_size);
2914*4882a593Smuzhiyun current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
2915*4882a593Smuzhiyun kvm_vz_local_flush_guesttlb_all();
2916*4882a593Smuzhiyun
2917*4882a593Smuzhiyun /*
2918*4882a593Smuzhiyun * Reduce to make space for root wired entries and at least 2
2919*4882a593Smuzhiyun * root non-wired entries. This does assume that long-term wired
2920*4882a593Smuzhiyun * entries won't be added later.
2921*4882a593Smuzhiyun */
2922*4882a593Smuzhiyun guest_mmu_size = mmu_size - num_wired_entries() - 2;
2923*4882a593Smuzhiyun guest_mmu_size = kvm_vz_resize_guest_vtlb(guest_mmu_size);
2924*4882a593Smuzhiyun current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
2925*4882a593Smuzhiyun
2926*4882a593Smuzhiyun /*
2927*4882a593Smuzhiyun * Write the VTLB size, but if another CPU has already written,
2928*4882a593Smuzhiyun * check it matches or we won't provide a consistent view to the
2929*4882a593Smuzhiyun * guest. If this ever happens it suggests an asymmetric number
2930*4882a593Smuzhiyun * of wired entries.
2931*4882a593Smuzhiyun */
2932*4882a593Smuzhiyun if (cmpxchg(&kvm_vz_guest_vtlb_size, 0, guest_mmu_size) &&
2933*4882a593Smuzhiyun WARN(guest_mmu_size != kvm_vz_guest_vtlb_size,
2934*4882a593Smuzhiyun "Available guest VTLB size mismatch"))
2935*4882a593Smuzhiyun return -EINVAL;
2936*4882a593Smuzhiyun break;
2937*4882a593Smuzhiyun }
2938*4882a593Smuzhiyun
2939*4882a593Smuzhiyun /*
2940*4882a593Smuzhiyun * Enable virtualization features granting guest direct control of
2941*4882a593Smuzhiyun * certain features:
2942*4882a593Smuzhiyun * CP0=1: Guest coprocessor 0 context.
2943*4882a593Smuzhiyun * AT=Guest: Guest MMU.
2944*4882a593Smuzhiyun * CG=1: Hit (virtual address) CACHE operations (optional).
2945*4882a593Smuzhiyun * CF=1: Guest Config registers.
2946*4882a593Smuzhiyun * CGI=1: Indexed flush CACHE operations (optional).
2947*4882a593Smuzhiyun */
2948*4882a593Smuzhiyun write_c0_guestctl0(MIPS_GCTL0_CP0 |
2949*4882a593Smuzhiyun (MIPS_GCTL0_AT_GUEST << MIPS_GCTL0_AT_SHIFT) |
2950*4882a593Smuzhiyun MIPS_GCTL0_CG | MIPS_GCTL0_CF);
2951*4882a593Smuzhiyun if (cpu_has_guestctl0ext) {
2952*4882a593Smuzhiyun if (current_cpu_type() != CPU_LOONGSON64)
2953*4882a593Smuzhiyun set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
2954*4882a593Smuzhiyun else
2955*4882a593Smuzhiyun clear_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
2956*4882a593Smuzhiyun }
2957*4882a593Smuzhiyun
2958*4882a593Smuzhiyun if (cpu_has_guestid) {
2959*4882a593Smuzhiyun write_c0_guestctl1(0);
2960*4882a593Smuzhiyun kvm_vz_local_flush_roottlb_all_guests();
2961*4882a593Smuzhiyun
2962*4882a593Smuzhiyun GUESTID_MASK = current_cpu_data.guestid_mask;
2963*4882a593Smuzhiyun GUESTID_FIRST_VERSION = GUESTID_MASK + 1;
2964*4882a593Smuzhiyun GUESTID_VERSION_MASK = ~GUESTID_MASK;
2965*4882a593Smuzhiyun
2966*4882a593Smuzhiyun current_cpu_data.guestid_cache = GUESTID_FIRST_VERSION;
2967*4882a593Smuzhiyun }
2968*4882a593Smuzhiyun
2969*4882a593Smuzhiyun /* clear any pending injected virtual guest interrupts */
2970*4882a593Smuzhiyun if (cpu_has_guestctl2)
2971*4882a593Smuzhiyun clear_c0_guestctl2(0x3f << 10);
2972*4882a593Smuzhiyun
2973*4882a593Smuzhiyun #ifdef CONFIG_CPU_LOONGSON64
2974*4882a593Smuzhiyun /* Control guest CCA attribute */
2975*4882a593Smuzhiyun if (cpu_has_csr())
2976*4882a593Smuzhiyun csr_writel(csr_readl(0xffffffec) | 0x1, 0xffffffec);
2977*4882a593Smuzhiyun #endif
2978*4882a593Smuzhiyun
2979*4882a593Smuzhiyun return 0;
2980*4882a593Smuzhiyun }
2981*4882a593Smuzhiyun
kvm_vz_hardware_disable(void)2982*4882a593Smuzhiyun static void kvm_vz_hardware_disable(void)
2983*4882a593Smuzhiyun {
2984*4882a593Smuzhiyun u64 cvmvmconfig;
2985*4882a593Smuzhiyun unsigned int mmu_size;
2986*4882a593Smuzhiyun
2987*4882a593Smuzhiyun /* Flush any remaining guest TLB entries */
2988*4882a593Smuzhiyun kvm_vz_local_flush_guesttlb_all();
2989*4882a593Smuzhiyun
2990*4882a593Smuzhiyun switch (current_cpu_type()) {
2991*4882a593Smuzhiyun case CPU_CAVIUM_OCTEON3:
2992*4882a593Smuzhiyun /*
2993*4882a593Smuzhiyun * Allocate whole TLB for root. Existing guest TLB entries will
2994*4882a593Smuzhiyun * change ownership to the root TLB. We should be safe though as
2995*4882a593Smuzhiyun * they've already been flushed above while in guest TLB.
2996*4882a593Smuzhiyun */
2997*4882a593Smuzhiyun cvmvmconfig = read_c0_cvmvmconfig();
2998*4882a593Smuzhiyun mmu_size = ((cvmvmconfig & CVMVMCONF_MMUSIZEM1)
2999*4882a593Smuzhiyun >> CVMVMCONF_MMUSIZEM1_S) + 1;
3000*4882a593Smuzhiyun cvmvmconfig &= ~CVMVMCONF_RMMUSIZEM1;
3001*4882a593Smuzhiyun cvmvmconfig |= mmu_size - 1;
3002*4882a593Smuzhiyun write_c0_cvmvmconfig(cvmvmconfig);
3003*4882a593Smuzhiyun
3004*4882a593Smuzhiyun /* Update our records */
3005*4882a593Smuzhiyun current_cpu_data.tlbsize = mmu_size;
3006*4882a593Smuzhiyun current_cpu_data.tlbsizevtlb = mmu_size;
3007*4882a593Smuzhiyun current_cpu_data.guest.tlbsize = 0;
3008*4882a593Smuzhiyun
3009*4882a593Smuzhiyun /* Flush moved entries in new (root) context */
3010*4882a593Smuzhiyun local_flush_tlb_all();
3011*4882a593Smuzhiyun break;
3012*4882a593Smuzhiyun }
3013*4882a593Smuzhiyun
3014*4882a593Smuzhiyun if (cpu_has_guestid) {
3015*4882a593Smuzhiyun write_c0_guestctl1(0);
3016*4882a593Smuzhiyun kvm_vz_local_flush_roottlb_all_guests();
3017*4882a593Smuzhiyun }
3018*4882a593Smuzhiyun }
3019*4882a593Smuzhiyun
kvm_vz_check_extension(struct kvm * kvm,long ext)3020*4882a593Smuzhiyun static int kvm_vz_check_extension(struct kvm *kvm, long ext)
3021*4882a593Smuzhiyun {
3022*4882a593Smuzhiyun int r;
3023*4882a593Smuzhiyun
3024*4882a593Smuzhiyun switch (ext) {
3025*4882a593Smuzhiyun case KVM_CAP_MIPS_VZ:
3026*4882a593Smuzhiyun /* we wouldn't be here unless cpu_has_vz */
3027*4882a593Smuzhiyun r = 1;
3028*4882a593Smuzhiyun break;
3029*4882a593Smuzhiyun #ifdef CONFIG_64BIT
3030*4882a593Smuzhiyun case KVM_CAP_MIPS_64BIT:
3031*4882a593Smuzhiyun /* We support 64-bit registers/operations and addresses */
3032*4882a593Smuzhiyun r = 2;
3033*4882a593Smuzhiyun break;
3034*4882a593Smuzhiyun #endif
3035*4882a593Smuzhiyun case KVM_CAP_IOEVENTFD:
3036*4882a593Smuzhiyun r = 1;
3037*4882a593Smuzhiyun break;
3038*4882a593Smuzhiyun default:
3039*4882a593Smuzhiyun r = 0;
3040*4882a593Smuzhiyun break;
3041*4882a593Smuzhiyun }
3042*4882a593Smuzhiyun
3043*4882a593Smuzhiyun return r;
3044*4882a593Smuzhiyun }
3045*4882a593Smuzhiyun
kvm_vz_vcpu_init(struct kvm_vcpu * vcpu)3046*4882a593Smuzhiyun static int kvm_vz_vcpu_init(struct kvm_vcpu *vcpu)
3047*4882a593Smuzhiyun {
3048*4882a593Smuzhiyun int i;
3049*4882a593Smuzhiyun
3050*4882a593Smuzhiyun for_each_possible_cpu(i)
3051*4882a593Smuzhiyun vcpu->arch.vzguestid[i] = 0;
3052*4882a593Smuzhiyun
3053*4882a593Smuzhiyun return 0;
3054*4882a593Smuzhiyun }
3055*4882a593Smuzhiyun
kvm_vz_vcpu_uninit(struct kvm_vcpu * vcpu)3056*4882a593Smuzhiyun static void kvm_vz_vcpu_uninit(struct kvm_vcpu *vcpu)
3057*4882a593Smuzhiyun {
3058*4882a593Smuzhiyun int cpu;
3059*4882a593Smuzhiyun
3060*4882a593Smuzhiyun /*
3061*4882a593Smuzhiyun * If the VCPU is freed and reused as another VCPU, we don't want the
3062*4882a593Smuzhiyun * matching pointer wrongly hanging around in last_vcpu[] or
3063*4882a593Smuzhiyun * last_exec_vcpu[].
3064*4882a593Smuzhiyun */
3065*4882a593Smuzhiyun for_each_possible_cpu(cpu) {
3066*4882a593Smuzhiyun if (last_vcpu[cpu] == vcpu)
3067*4882a593Smuzhiyun last_vcpu[cpu] = NULL;
3068*4882a593Smuzhiyun if (last_exec_vcpu[cpu] == vcpu)
3069*4882a593Smuzhiyun last_exec_vcpu[cpu] = NULL;
3070*4882a593Smuzhiyun }
3071*4882a593Smuzhiyun }
3072*4882a593Smuzhiyun
kvm_vz_vcpu_setup(struct kvm_vcpu * vcpu)3073*4882a593Smuzhiyun static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
3074*4882a593Smuzhiyun {
3075*4882a593Smuzhiyun struct mips_coproc *cop0 = vcpu->arch.cop0;
3076*4882a593Smuzhiyun unsigned long count_hz = 100*1000*1000; /* default to 100 MHz */
3077*4882a593Smuzhiyun
3078*4882a593Smuzhiyun /*
3079*4882a593Smuzhiyun * Start off the timer at the same frequency as the host timer, but the
3080*4882a593Smuzhiyun * soft timer doesn't handle frequencies greater than 1GHz yet.
3081*4882a593Smuzhiyun */
3082*4882a593Smuzhiyun if (mips_hpt_frequency && mips_hpt_frequency <= NSEC_PER_SEC)
3083*4882a593Smuzhiyun count_hz = mips_hpt_frequency;
3084*4882a593Smuzhiyun kvm_mips_init_count(vcpu, count_hz);
3085*4882a593Smuzhiyun
3086*4882a593Smuzhiyun /*
3087*4882a593Smuzhiyun * Initialize guest register state to valid architectural reset state.
3088*4882a593Smuzhiyun */
3089*4882a593Smuzhiyun
3090*4882a593Smuzhiyun /* PageGrain */
3091*4882a593Smuzhiyun if (cpu_has_mips_r5 || cpu_has_mips_r6)
3092*4882a593Smuzhiyun kvm_write_sw_gc0_pagegrain(cop0, PG_RIE | PG_XIE | PG_IEC);
3093*4882a593Smuzhiyun /* Wired */
3094*4882a593Smuzhiyun if (cpu_has_mips_r6)
3095*4882a593Smuzhiyun kvm_write_sw_gc0_wired(cop0,
3096*4882a593Smuzhiyun read_gc0_wired() & MIPSR6_WIRED_LIMIT);
3097*4882a593Smuzhiyun /* Status */
3098*4882a593Smuzhiyun kvm_write_sw_gc0_status(cop0, ST0_BEV | ST0_ERL);
3099*4882a593Smuzhiyun if (cpu_has_mips_r5 || cpu_has_mips_r6)
3100*4882a593Smuzhiyun kvm_change_sw_gc0_status(cop0, ST0_FR, read_gc0_status());
3101*4882a593Smuzhiyun /* IntCtl */
3102*4882a593Smuzhiyun kvm_write_sw_gc0_intctl(cop0, read_gc0_intctl() &
3103*4882a593Smuzhiyun (INTCTLF_IPFDC | INTCTLF_IPPCI | INTCTLF_IPTI));
3104*4882a593Smuzhiyun /* PRId */
3105*4882a593Smuzhiyun kvm_write_sw_gc0_prid(cop0, boot_cpu_data.processor_id);
3106*4882a593Smuzhiyun /* EBase */
3107*4882a593Smuzhiyun kvm_write_sw_gc0_ebase(cop0, (s32)0x80000000 | vcpu->vcpu_id);
3108*4882a593Smuzhiyun /* Config */
3109*4882a593Smuzhiyun kvm_save_gc0_config(cop0);
3110*4882a593Smuzhiyun /* architecturally writable (e.g. from guest) */
3111*4882a593Smuzhiyun kvm_change_sw_gc0_config(cop0, CONF_CM_CMASK,
3112*4882a593Smuzhiyun _page_cachable_default >> _CACHE_SHIFT);
3113*4882a593Smuzhiyun /* architecturally read only, but maybe writable from root */
3114*4882a593Smuzhiyun kvm_change_sw_gc0_config(cop0, MIPS_CONF_MT, read_c0_config());
3115*4882a593Smuzhiyun if (cpu_guest_has_conf1) {
3116*4882a593Smuzhiyun kvm_set_sw_gc0_config(cop0, MIPS_CONF_M);
3117*4882a593Smuzhiyun /* Config1 */
3118*4882a593Smuzhiyun kvm_save_gc0_config1(cop0);
3119*4882a593Smuzhiyun /* architecturally read only, but maybe writable from root */
3120*4882a593Smuzhiyun kvm_clear_sw_gc0_config1(cop0, MIPS_CONF1_C2 |
3121*4882a593Smuzhiyun MIPS_CONF1_MD |
3122*4882a593Smuzhiyun MIPS_CONF1_PC |
3123*4882a593Smuzhiyun MIPS_CONF1_WR |
3124*4882a593Smuzhiyun MIPS_CONF1_CA |
3125*4882a593Smuzhiyun MIPS_CONF1_FP);
3126*4882a593Smuzhiyun }
3127*4882a593Smuzhiyun if (cpu_guest_has_conf2) {
3128*4882a593Smuzhiyun kvm_set_sw_gc0_config1(cop0, MIPS_CONF_M);
3129*4882a593Smuzhiyun /* Config2 */
3130*4882a593Smuzhiyun kvm_save_gc0_config2(cop0);
3131*4882a593Smuzhiyun }
3132*4882a593Smuzhiyun if (cpu_guest_has_conf3) {
3133*4882a593Smuzhiyun kvm_set_sw_gc0_config2(cop0, MIPS_CONF_M);
3134*4882a593Smuzhiyun /* Config3 */
3135*4882a593Smuzhiyun kvm_save_gc0_config3(cop0);
3136*4882a593Smuzhiyun /* architecturally writable (e.g. from guest) */
3137*4882a593Smuzhiyun kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_ISA_OE);
3138*4882a593Smuzhiyun /* architecturally read only, but maybe writable from root */
3139*4882a593Smuzhiyun kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_MSA |
3140*4882a593Smuzhiyun MIPS_CONF3_BPG |
3141*4882a593Smuzhiyun MIPS_CONF3_ULRI |
3142*4882a593Smuzhiyun MIPS_CONF3_DSP |
3143*4882a593Smuzhiyun MIPS_CONF3_CTXTC |
3144*4882a593Smuzhiyun MIPS_CONF3_ITL |
3145*4882a593Smuzhiyun MIPS_CONF3_LPA |
3146*4882a593Smuzhiyun MIPS_CONF3_VEIC |
3147*4882a593Smuzhiyun MIPS_CONF3_VINT |
3148*4882a593Smuzhiyun MIPS_CONF3_SP |
3149*4882a593Smuzhiyun MIPS_CONF3_CDMM |
3150*4882a593Smuzhiyun MIPS_CONF3_MT |
3151*4882a593Smuzhiyun MIPS_CONF3_SM |
3152*4882a593Smuzhiyun MIPS_CONF3_TL);
3153*4882a593Smuzhiyun }
3154*4882a593Smuzhiyun if (cpu_guest_has_conf4) {
3155*4882a593Smuzhiyun kvm_set_sw_gc0_config3(cop0, MIPS_CONF_M);
3156*4882a593Smuzhiyun /* Config4 */
3157*4882a593Smuzhiyun kvm_save_gc0_config4(cop0);
3158*4882a593Smuzhiyun }
3159*4882a593Smuzhiyun if (cpu_guest_has_conf5) {
3160*4882a593Smuzhiyun kvm_set_sw_gc0_config4(cop0, MIPS_CONF_M);
3161*4882a593Smuzhiyun /* Config5 */
3162*4882a593Smuzhiyun kvm_save_gc0_config5(cop0);
3163*4882a593Smuzhiyun /* architecturally writable (e.g. from guest) */
3164*4882a593Smuzhiyun kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_K |
3165*4882a593Smuzhiyun MIPS_CONF5_CV |
3166*4882a593Smuzhiyun MIPS_CONF5_MSAEN |
3167*4882a593Smuzhiyun MIPS_CONF5_UFE |
3168*4882a593Smuzhiyun MIPS_CONF5_FRE |
3169*4882a593Smuzhiyun MIPS_CONF5_SBRI |
3170*4882a593Smuzhiyun MIPS_CONF5_UFR);
3171*4882a593Smuzhiyun /* architecturally read only, but maybe writable from root */
3172*4882a593Smuzhiyun kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_MRP);
3173*4882a593Smuzhiyun }
3174*4882a593Smuzhiyun
3175*4882a593Smuzhiyun if (cpu_guest_has_contextconfig) {
3176*4882a593Smuzhiyun /* ContextConfig */
3177*4882a593Smuzhiyun kvm_write_sw_gc0_contextconfig(cop0, 0x007ffff0);
3178*4882a593Smuzhiyun #ifdef CONFIG_64BIT
3179*4882a593Smuzhiyun /* XContextConfig */
3180*4882a593Smuzhiyun /* bits SEGBITS-13+3:4 set */
3181*4882a593Smuzhiyun kvm_write_sw_gc0_xcontextconfig(cop0,
3182*4882a593Smuzhiyun ((1ull << (cpu_vmbits - 13)) - 1) << 4);
3183*4882a593Smuzhiyun #endif
3184*4882a593Smuzhiyun }
3185*4882a593Smuzhiyun
3186*4882a593Smuzhiyun /* Implementation dependent, use the legacy layout */
3187*4882a593Smuzhiyun if (cpu_guest_has_segments) {
3188*4882a593Smuzhiyun /* SegCtl0, SegCtl1, SegCtl2 */
3189*4882a593Smuzhiyun kvm_write_sw_gc0_segctl0(cop0, 0x00200010);
3190*4882a593Smuzhiyun kvm_write_sw_gc0_segctl1(cop0, 0x00000002 |
3191*4882a593Smuzhiyun (_page_cachable_default >> _CACHE_SHIFT) <<
3192*4882a593Smuzhiyun (16 + MIPS_SEGCFG_C_SHIFT));
3193*4882a593Smuzhiyun kvm_write_sw_gc0_segctl2(cop0, 0x00380438);
3194*4882a593Smuzhiyun }
3195*4882a593Smuzhiyun
3196*4882a593Smuzhiyun /* reset HTW registers */
3197*4882a593Smuzhiyun if (cpu_guest_has_htw && (cpu_has_mips_r5 || cpu_has_mips_r6)) {
3198*4882a593Smuzhiyun /* PWField */
3199*4882a593Smuzhiyun kvm_write_sw_gc0_pwfield(cop0, 0x0c30c302);
3200*4882a593Smuzhiyun /* PWSize */
3201*4882a593Smuzhiyun kvm_write_sw_gc0_pwsize(cop0, 1 << MIPS_PWSIZE_PTW_SHIFT);
3202*4882a593Smuzhiyun }
3203*4882a593Smuzhiyun
3204*4882a593Smuzhiyun /* start with no pending virtual guest interrupts */
3205*4882a593Smuzhiyun if (cpu_has_guestctl2)
3206*4882a593Smuzhiyun cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 0;
3207*4882a593Smuzhiyun
3208*4882a593Smuzhiyun /* Put PC at reset vector */
3209*4882a593Smuzhiyun vcpu->arch.pc = CKSEG1ADDR(0x1fc00000);
3210*4882a593Smuzhiyun
3211*4882a593Smuzhiyun return 0;
3212*4882a593Smuzhiyun }
3213*4882a593Smuzhiyun
kvm_vz_flush_shadow_all(struct kvm * kvm)3214*4882a593Smuzhiyun static void kvm_vz_flush_shadow_all(struct kvm *kvm)
3215*4882a593Smuzhiyun {
3216*4882a593Smuzhiyun if (cpu_has_guestid) {
3217*4882a593Smuzhiyun /* Flush GuestID for each VCPU individually */
3218*4882a593Smuzhiyun kvm_flush_remote_tlbs(kvm);
3219*4882a593Smuzhiyun } else {
3220*4882a593Smuzhiyun /*
3221*4882a593Smuzhiyun * For each CPU there is a single GPA ASID used by all VCPUs in
3222*4882a593Smuzhiyun * the VM, so it doesn't make sense for the VCPUs to handle
3223*4882a593Smuzhiyun * invalidation of these ASIDs individually.
3224*4882a593Smuzhiyun *
3225*4882a593Smuzhiyun * Instead mark all CPUs as needing ASID invalidation in
3226*4882a593Smuzhiyun * asid_flush_mask, and just use kvm_flush_remote_tlbs(kvm) to
3227*4882a593Smuzhiyun * kick any running VCPUs so they check asid_flush_mask.
3228*4882a593Smuzhiyun */
3229*4882a593Smuzhiyun cpumask_setall(&kvm->arch.asid_flush_mask);
3230*4882a593Smuzhiyun kvm_flush_remote_tlbs(kvm);
3231*4882a593Smuzhiyun }
3232*4882a593Smuzhiyun }
3233*4882a593Smuzhiyun
kvm_vz_flush_shadow_memslot(struct kvm * kvm,const struct kvm_memory_slot * slot)3234*4882a593Smuzhiyun static void kvm_vz_flush_shadow_memslot(struct kvm *kvm,
3235*4882a593Smuzhiyun const struct kvm_memory_slot *slot)
3236*4882a593Smuzhiyun {
3237*4882a593Smuzhiyun kvm_vz_flush_shadow_all(kvm);
3238*4882a593Smuzhiyun }
3239*4882a593Smuzhiyun
kvm_vz_vcpu_reenter(struct kvm_vcpu * vcpu)3240*4882a593Smuzhiyun static void kvm_vz_vcpu_reenter(struct kvm_vcpu *vcpu)
3241*4882a593Smuzhiyun {
3242*4882a593Smuzhiyun int cpu = smp_processor_id();
3243*4882a593Smuzhiyun int preserve_guest_tlb;
3244*4882a593Smuzhiyun
3245*4882a593Smuzhiyun preserve_guest_tlb = kvm_vz_check_requests(vcpu, cpu);
3246*4882a593Smuzhiyun
3247*4882a593Smuzhiyun if (preserve_guest_tlb)
3248*4882a593Smuzhiyun kvm_vz_vcpu_save_wired(vcpu);
3249*4882a593Smuzhiyun
3250*4882a593Smuzhiyun kvm_vz_vcpu_load_tlb(vcpu, cpu);
3251*4882a593Smuzhiyun
3252*4882a593Smuzhiyun if (preserve_guest_tlb)
3253*4882a593Smuzhiyun kvm_vz_vcpu_load_wired(vcpu);
3254*4882a593Smuzhiyun }
3255*4882a593Smuzhiyun
kvm_vz_vcpu_run(struct kvm_vcpu * vcpu)3256*4882a593Smuzhiyun static int kvm_vz_vcpu_run(struct kvm_vcpu *vcpu)
3257*4882a593Smuzhiyun {
3258*4882a593Smuzhiyun int cpu = smp_processor_id();
3259*4882a593Smuzhiyun int r;
3260*4882a593Smuzhiyun
3261*4882a593Smuzhiyun kvm_vz_acquire_htimer(vcpu);
3262*4882a593Smuzhiyun /* Check if we have any exceptions/interrupts pending */
3263*4882a593Smuzhiyun kvm_mips_deliver_interrupts(vcpu, read_gc0_cause());
3264*4882a593Smuzhiyun
3265*4882a593Smuzhiyun kvm_vz_check_requests(vcpu, cpu);
3266*4882a593Smuzhiyun kvm_vz_vcpu_load_tlb(vcpu, cpu);
3267*4882a593Smuzhiyun kvm_vz_vcpu_load_wired(vcpu);
3268*4882a593Smuzhiyun
3269*4882a593Smuzhiyun r = vcpu->arch.vcpu_run(vcpu);
3270*4882a593Smuzhiyun
3271*4882a593Smuzhiyun kvm_vz_vcpu_save_wired(vcpu);
3272*4882a593Smuzhiyun
3273*4882a593Smuzhiyun return r;
3274*4882a593Smuzhiyun }
3275*4882a593Smuzhiyun
3276*4882a593Smuzhiyun static struct kvm_mips_callbacks kvm_vz_callbacks = {
3277*4882a593Smuzhiyun .handle_cop_unusable = kvm_trap_vz_handle_cop_unusable,
3278*4882a593Smuzhiyun .handle_tlb_mod = kvm_trap_vz_handle_tlb_st_miss,
3279*4882a593Smuzhiyun .handle_tlb_ld_miss = kvm_trap_vz_handle_tlb_ld_miss,
3280*4882a593Smuzhiyun .handle_tlb_st_miss = kvm_trap_vz_handle_tlb_st_miss,
3281*4882a593Smuzhiyun .handle_addr_err_st = kvm_trap_vz_no_handler,
3282*4882a593Smuzhiyun .handle_addr_err_ld = kvm_trap_vz_no_handler,
3283*4882a593Smuzhiyun .handle_syscall = kvm_trap_vz_no_handler,
3284*4882a593Smuzhiyun .handle_res_inst = kvm_trap_vz_no_handler,
3285*4882a593Smuzhiyun .handle_break = kvm_trap_vz_no_handler,
3286*4882a593Smuzhiyun .handle_msa_disabled = kvm_trap_vz_handle_msa_disabled,
3287*4882a593Smuzhiyun .handle_guest_exit = kvm_trap_vz_handle_guest_exit,
3288*4882a593Smuzhiyun
3289*4882a593Smuzhiyun .hardware_enable = kvm_vz_hardware_enable,
3290*4882a593Smuzhiyun .hardware_disable = kvm_vz_hardware_disable,
3291*4882a593Smuzhiyun .check_extension = kvm_vz_check_extension,
3292*4882a593Smuzhiyun .vcpu_init = kvm_vz_vcpu_init,
3293*4882a593Smuzhiyun .vcpu_uninit = kvm_vz_vcpu_uninit,
3294*4882a593Smuzhiyun .vcpu_setup = kvm_vz_vcpu_setup,
3295*4882a593Smuzhiyun .flush_shadow_all = kvm_vz_flush_shadow_all,
3296*4882a593Smuzhiyun .flush_shadow_memslot = kvm_vz_flush_shadow_memslot,
3297*4882a593Smuzhiyun .gva_to_gpa = kvm_vz_gva_to_gpa_cb,
3298*4882a593Smuzhiyun .queue_timer_int = kvm_vz_queue_timer_int_cb,
3299*4882a593Smuzhiyun .dequeue_timer_int = kvm_vz_dequeue_timer_int_cb,
3300*4882a593Smuzhiyun .queue_io_int = kvm_vz_queue_io_int_cb,
3301*4882a593Smuzhiyun .dequeue_io_int = kvm_vz_dequeue_io_int_cb,
3302*4882a593Smuzhiyun .irq_deliver = kvm_vz_irq_deliver_cb,
3303*4882a593Smuzhiyun .irq_clear = kvm_vz_irq_clear_cb,
3304*4882a593Smuzhiyun .num_regs = kvm_vz_num_regs,
3305*4882a593Smuzhiyun .copy_reg_indices = kvm_vz_copy_reg_indices,
3306*4882a593Smuzhiyun .get_one_reg = kvm_vz_get_one_reg,
3307*4882a593Smuzhiyun .set_one_reg = kvm_vz_set_one_reg,
3308*4882a593Smuzhiyun .vcpu_load = kvm_vz_vcpu_load,
3309*4882a593Smuzhiyun .vcpu_put = kvm_vz_vcpu_put,
3310*4882a593Smuzhiyun .vcpu_run = kvm_vz_vcpu_run,
3311*4882a593Smuzhiyun .vcpu_reenter = kvm_vz_vcpu_reenter,
3312*4882a593Smuzhiyun };
3313*4882a593Smuzhiyun
kvm_mips_emulation_init(struct kvm_mips_callbacks ** install_callbacks)3314*4882a593Smuzhiyun int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
3315*4882a593Smuzhiyun {
3316*4882a593Smuzhiyun if (!cpu_has_vz)
3317*4882a593Smuzhiyun return -ENODEV;
3318*4882a593Smuzhiyun
3319*4882a593Smuzhiyun /*
3320*4882a593Smuzhiyun * VZ requires at least 2 KScratch registers, so it should have been
3321*4882a593Smuzhiyun * possible to allocate pgd_reg.
3322*4882a593Smuzhiyun */
3323*4882a593Smuzhiyun if (WARN(pgd_reg == -1,
3324*4882a593Smuzhiyun "pgd_reg not allocated even though cpu_has_vz\n"))
3325*4882a593Smuzhiyun return -ENODEV;
3326*4882a593Smuzhiyun
3327*4882a593Smuzhiyun pr_info("Starting KVM with MIPS VZ extensions\n");
3328*4882a593Smuzhiyun
3329*4882a593Smuzhiyun *install_callbacks = &kvm_vz_callbacks;
3330*4882a593Smuzhiyun return 0;
3331*4882a593Smuzhiyun }
3332