1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright IBM Corp. 2007
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Authors: Hollis Blanchard <hollisb@us.ibm.com>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #ifndef __POWERPC_KVM_HOST_H__
10*4882a593Smuzhiyun #define __POWERPC_KVM_HOST_H__
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/mutex.h>
13*4882a593Smuzhiyun #include <linux/hrtimer.h>
14*4882a593Smuzhiyun #include <linux/interrupt.h>
15*4882a593Smuzhiyun #include <linux/types.h>
16*4882a593Smuzhiyun #include <linux/kvm_types.h>
17*4882a593Smuzhiyun #include <linux/threads.h>
18*4882a593Smuzhiyun #include <linux/spinlock.h>
19*4882a593Smuzhiyun #include <linux/kvm_para.h>
20*4882a593Smuzhiyun #include <linux/list.h>
21*4882a593Smuzhiyun #include <linux/atomic.h>
22*4882a593Smuzhiyun #include <asm/kvm_asm.h>
23*4882a593Smuzhiyun #include <asm/processor.h>
24*4882a593Smuzhiyun #include <asm/page.h>
25*4882a593Smuzhiyun #include <asm/cacheflush.h>
26*4882a593Smuzhiyun #include <asm/hvcall.h>
27*4882a593Smuzhiyun #include <asm/mce.h>
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #define KVM_MAX_VCPUS NR_CPUS
30*4882a593Smuzhiyun #define KVM_MAX_VCORES NR_CPUS
31*4882a593Smuzhiyun #define KVM_USER_MEM_SLOTS 512
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #include <asm/cputhreads.h>
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
36*4882a593Smuzhiyun #include <asm/kvm_book3s_asm.h> /* for MAX_SMT_THREADS */
37*4882a593Smuzhiyun #define KVM_MAX_VCPU_ID (MAX_SMT_THREADS * KVM_MAX_VCORES)
38*4882a593Smuzhiyun #define KVM_MAX_NESTED_GUESTS KVMPPC_NR_LPIDS
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun #else
41*4882a593Smuzhiyun #define KVM_MAX_VCPU_ID KVM_MAX_VCPUS
42*4882a593Smuzhiyun #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun #define __KVM_HAVE_ARCH_INTC_INITIALIZED
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun #define KVM_HALT_POLL_NS_DEFAULT 10000 /* 10 us */
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun /* These values are internal and can be increased later */
49*4882a593Smuzhiyun #define KVM_NR_IRQCHIPS 1
50*4882a593Smuzhiyun #define KVM_IRQCHIP_NUM_PINS 256
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /* PPC-specific vcpu->requests bit members */
53*4882a593Smuzhiyun #define KVM_REQ_WATCHDOG KVM_ARCH_REQ(0)
54*4882a593Smuzhiyun #define KVM_REQ_EPR_EXIT KVM_ARCH_REQ(1)
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun #include <linux/mmu_notifier.h>
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun #define KVM_ARCH_WANT_MMU_NOTIFIER
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun extern int kvm_unmap_hva_range(struct kvm *kvm,
61*4882a593Smuzhiyun unsigned long start, unsigned long end,
62*4882a593Smuzhiyun unsigned flags);
63*4882a593Smuzhiyun extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
64*4882a593Smuzhiyun extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
65*4882a593Smuzhiyun extern int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun #define HPTEG_CACHE_NUM (1 << 15)
68*4882a593Smuzhiyun #define HPTEG_HASH_BITS_PTE 13
69*4882a593Smuzhiyun #define HPTEG_HASH_BITS_PTE_LONG 12
70*4882a593Smuzhiyun #define HPTEG_HASH_BITS_VPTE 13
71*4882a593Smuzhiyun #define HPTEG_HASH_BITS_VPTE_LONG 5
72*4882a593Smuzhiyun #define HPTEG_HASH_BITS_VPTE_64K 11
73*4882a593Smuzhiyun #define HPTEG_HASH_NUM_PTE (1 << HPTEG_HASH_BITS_PTE)
74*4882a593Smuzhiyun #define HPTEG_HASH_NUM_PTE_LONG (1 << HPTEG_HASH_BITS_PTE_LONG)
75*4882a593Smuzhiyun #define HPTEG_HASH_NUM_VPTE (1 << HPTEG_HASH_BITS_VPTE)
76*4882a593Smuzhiyun #define HPTEG_HASH_NUM_VPTE_LONG (1 << HPTEG_HASH_BITS_VPTE_LONG)
77*4882a593Smuzhiyun #define HPTEG_HASH_NUM_VPTE_64K (1 << HPTEG_HASH_BITS_VPTE_64K)
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /* Physical Address Mask - allowed range of real mode RAM access */
80*4882a593Smuzhiyun #define KVM_PAM 0x0fffffffffffffffULL
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun struct lppaca;
83*4882a593Smuzhiyun struct slb_shadow;
84*4882a593Smuzhiyun struct dtl_entry;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun struct kvmppc_vcpu_book3s;
87*4882a593Smuzhiyun struct kvmppc_book3s_shadow_vcpu;
88*4882a593Smuzhiyun struct kvm_nested_guest;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun struct kvm_vm_stat {
91*4882a593Smuzhiyun ulong remote_tlb_flush;
92*4882a593Smuzhiyun ulong num_2M_pages;
93*4882a593Smuzhiyun ulong num_1G_pages;
94*4882a593Smuzhiyun };
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun struct kvm_vcpu_stat {
97*4882a593Smuzhiyun u64 sum_exits;
98*4882a593Smuzhiyun u64 mmio_exits;
99*4882a593Smuzhiyun u64 signal_exits;
100*4882a593Smuzhiyun u64 light_exits;
101*4882a593Smuzhiyun /* Account for special types of light exits: */
102*4882a593Smuzhiyun u64 itlb_real_miss_exits;
103*4882a593Smuzhiyun u64 itlb_virt_miss_exits;
104*4882a593Smuzhiyun u64 dtlb_real_miss_exits;
105*4882a593Smuzhiyun u64 dtlb_virt_miss_exits;
106*4882a593Smuzhiyun u64 syscall_exits;
107*4882a593Smuzhiyun u64 isi_exits;
108*4882a593Smuzhiyun u64 dsi_exits;
109*4882a593Smuzhiyun u64 emulated_inst_exits;
110*4882a593Smuzhiyun u64 dec_exits;
111*4882a593Smuzhiyun u64 ext_intr_exits;
112*4882a593Smuzhiyun u64 halt_poll_success_ns;
113*4882a593Smuzhiyun u64 halt_poll_fail_ns;
114*4882a593Smuzhiyun u64 halt_wait_ns;
115*4882a593Smuzhiyun u64 halt_successful_poll;
116*4882a593Smuzhiyun u64 halt_attempted_poll;
117*4882a593Smuzhiyun u64 halt_successful_wait;
118*4882a593Smuzhiyun u64 halt_poll_invalid;
119*4882a593Smuzhiyun u64 halt_wakeup;
120*4882a593Smuzhiyun u64 dbell_exits;
121*4882a593Smuzhiyun u64 gdbell_exits;
122*4882a593Smuzhiyun u64 ld;
123*4882a593Smuzhiyun u64 st;
124*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S
125*4882a593Smuzhiyun u64 pf_storage;
126*4882a593Smuzhiyun u64 pf_instruc;
127*4882a593Smuzhiyun u64 sp_storage;
128*4882a593Smuzhiyun u64 sp_instruc;
129*4882a593Smuzhiyun u64 queue_intr;
130*4882a593Smuzhiyun u64 ld_slow;
131*4882a593Smuzhiyun u64 st_slow;
132*4882a593Smuzhiyun #endif
133*4882a593Smuzhiyun u64 pthru_all;
134*4882a593Smuzhiyun u64 pthru_host;
135*4882a593Smuzhiyun u64 pthru_bad_aff;
136*4882a593Smuzhiyun };
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun enum kvm_exit_types {
139*4882a593Smuzhiyun MMIO_EXITS,
140*4882a593Smuzhiyun SIGNAL_EXITS,
141*4882a593Smuzhiyun ITLB_REAL_MISS_EXITS,
142*4882a593Smuzhiyun ITLB_VIRT_MISS_EXITS,
143*4882a593Smuzhiyun DTLB_REAL_MISS_EXITS,
144*4882a593Smuzhiyun DTLB_VIRT_MISS_EXITS,
145*4882a593Smuzhiyun SYSCALL_EXITS,
146*4882a593Smuzhiyun ISI_EXITS,
147*4882a593Smuzhiyun DSI_EXITS,
148*4882a593Smuzhiyun EMULATED_INST_EXITS,
149*4882a593Smuzhiyun EMULATED_MTMSRWE_EXITS,
150*4882a593Smuzhiyun EMULATED_WRTEE_EXITS,
151*4882a593Smuzhiyun EMULATED_MTSPR_EXITS,
152*4882a593Smuzhiyun EMULATED_MFSPR_EXITS,
153*4882a593Smuzhiyun EMULATED_MTMSR_EXITS,
154*4882a593Smuzhiyun EMULATED_MFMSR_EXITS,
155*4882a593Smuzhiyun EMULATED_TLBSX_EXITS,
156*4882a593Smuzhiyun EMULATED_TLBWE_EXITS,
157*4882a593Smuzhiyun EMULATED_RFI_EXITS,
158*4882a593Smuzhiyun EMULATED_RFCI_EXITS,
159*4882a593Smuzhiyun EMULATED_RFDI_EXITS,
160*4882a593Smuzhiyun DEC_EXITS,
161*4882a593Smuzhiyun EXT_INTR_EXITS,
162*4882a593Smuzhiyun HALT_WAKEUP,
163*4882a593Smuzhiyun USR_PR_INST,
164*4882a593Smuzhiyun FP_UNAVAIL,
165*4882a593Smuzhiyun DEBUG_EXITS,
166*4882a593Smuzhiyun TIMEINGUEST,
167*4882a593Smuzhiyun DBELL_EXITS,
168*4882a593Smuzhiyun GDBELL_EXITS,
169*4882a593Smuzhiyun __NUMBER_OF_KVM_EXIT_TYPES
170*4882a593Smuzhiyun };
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun /* allow access to big endian 32bit upper/lower parts and 64bit var */
173*4882a593Smuzhiyun struct kvmppc_exit_timing {
174*4882a593Smuzhiyun union {
175*4882a593Smuzhiyun u64 tv64;
176*4882a593Smuzhiyun struct {
177*4882a593Smuzhiyun u32 tbu, tbl;
178*4882a593Smuzhiyun } tv32;
179*4882a593Smuzhiyun };
180*4882a593Smuzhiyun };
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun struct kvmppc_pginfo {
183*4882a593Smuzhiyun unsigned long pfn;
184*4882a593Smuzhiyun atomic_t refcnt;
185*4882a593Smuzhiyun };
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun struct kvmppc_spapr_tce_iommu_table {
188*4882a593Smuzhiyun struct rcu_head rcu;
189*4882a593Smuzhiyun struct list_head next;
190*4882a593Smuzhiyun struct iommu_table *tbl;
191*4882a593Smuzhiyun struct kref kref;
192*4882a593Smuzhiyun };
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun #define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun struct kvmppc_spapr_tce_table {
197*4882a593Smuzhiyun struct list_head list;
198*4882a593Smuzhiyun struct kvm *kvm;
199*4882a593Smuzhiyun u64 liobn;
200*4882a593Smuzhiyun struct rcu_head rcu;
201*4882a593Smuzhiyun u32 page_shift;
202*4882a593Smuzhiyun u64 offset; /* in pages */
203*4882a593Smuzhiyun u64 size; /* window size in pages */
204*4882a593Smuzhiyun struct list_head iommu_tables;
205*4882a593Smuzhiyun struct mutex alloc_lock;
206*4882a593Smuzhiyun struct page *pages[0];
207*4882a593Smuzhiyun };
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun /* XICS components, defined in book3s_xics.c */
210*4882a593Smuzhiyun struct kvmppc_xics;
211*4882a593Smuzhiyun struct kvmppc_icp;
212*4882a593Smuzhiyun extern struct kvm_device_ops kvm_xics_ops;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun /* XIVE components, defined in book3s_xive.c */
215*4882a593Smuzhiyun struct kvmppc_xive;
216*4882a593Smuzhiyun struct kvmppc_xive_vcpu;
217*4882a593Smuzhiyun extern struct kvm_device_ops kvm_xive_ops;
218*4882a593Smuzhiyun extern struct kvm_device_ops kvm_xive_native_ops;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun struct kvmppc_passthru_irqmap;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun /*
223*4882a593Smuzhiyun * The reverse mapping array has one entry for each HPTE,
224*4882a593Smuzhiyun * which stores the guest's view of the second word of the HPTE
225*4882a593Smuzhiyun * (including the guest physical address of the mapping),
226*4882a593Smuzhiyun * plus forward and backward pointers in a doubly-linked ring
227*4882a593Smuzhiyun * of HPTEs that map the same host page. The pointers in this
228*4882a593Smuzhiyun * ring are 32-bit HPTE indexes, to save space.
229*4882a593Smuzhiyun */
230*4882a593Smuzhiyun struct revmap_entry {
231*4882a593Smuzhiyun unsigned long guest_rpte;
232*4882a593Smuzhiyun unsigned int forw, back;
233*4882a593Smuzhiyun };
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /*
236*4882a593Smuzhiyun * The rmap array of size number of guest pages is allocated for each memslot.
237*4882a593Smuzhiyun * This array is used to store usage specific information about the guest page.
238*4882a593Smuzhiyun * Below are the encodings of the various possible usage types.
239*4882a593Smuzhiyun */
240*4882a593Smuzhiyun /* Free bits which can be used to define a new usage */
241*4882a593Smuzhiyun #define KVMPPC_RMAP_TYPE_MASK 0xff00000000000000
242*4882a593Smuzhiyun #define KVMPPC_RMAP_NESTED 0xc000000000000000 /* Nested rmap array */
243*4882a593Smuzhiyun #define KVMPPC_RMAP_HPT 0x0100000000000000 /* HPT guest */
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun /*
246*4882a593Smuzhiyun * rmap usage definition for a hash page table (hpt) guest:
247*4882a593Smuzhiyun * 0x0000080000000000 Lock bit
248*4882a593Smuzhiyun * 0x0000018000000000 RC bits
249*4882a593Smuzhiyun * 0x0000000100000000 Present bit
250*4882a593Smuzhiyun * 0x00000000ffffffff HPT index bits
251*4882a593Smuzhiyun * The bottom 32 bits are the index in the guest HPT of a HPTE that points to
252*4882a593Smuzhiyun * the page.
253*4882a593Smuzhiyun */
254*4882a593Smuzhiyun #define KVMPPC_RMAP_LOCK_BIT 43
255*4882a593Smuzhiyun #define KVMPPC_RMAP_RC_SHIFT 32
256*4882a593Smuzhiyun #define KVMPPC_RMAP_REFERENCED (HPTE_R_R << KVMPPC_RMAP_RC_SHIFT)
257*4882a593Smuzhiyun #define KVMPPC_RMAP_PRESENT 0x100000000ul
258*4882a593Smuzhiyun #define KVMPPC_RMAP_INDEX 0xfffffffful
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun struct kvm_arch_memory_slot {
261*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
262*4882a593Smuzhiyun unsigned long *rmap;
263*4882a593Smuzhiyun #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
264*4882a593Smuzhiyun };
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun struct kvm_hpt_info {
267*4882a593Smuzhiyun /* Host virtual (linear mapping) address of guest HPT */
268*4882a593Smuzhiyun unsigned long virt;
269*4882a593Smuzhiyun /* Array of reverse mapping entries for each guest HPTE */
270*4882a593Smuzhiyun struct revmap_entry *rev;
271*4882a593Smuzhiyun /* Guest HPT size is 2**(order) bytes */
272*4882a593Smuzhiyun u32 order;
273*4882a593Smuzhiyun /* 1 if HPT allocated with CMA, 0 otherwise */
274*4882a593Smuzhiyun int cma;
275*4882a593Smuzhiyun };
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun struct kvm_resize_hpt;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun /* Flag values for kvm_arch.secure_guest */
280*4882a593Smuzhiyun #define KVMPPC_SECURE_INIT_START 0x1 /* H_SVM_INIT_START has been called */
281*4882a593Smuzhiyun #define KVMPPC_SECURE_INIT_DONE 0x2 /* H_SVM_INIT_DONE completed */
282*4882a593Smuzhiyun #define KVMPPC_SECURE_INIT_ABORT 0x4 /* H_SVM_INIT_ABORT issued */
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun struct kvm_arch {
285*4882a593Smuzhiyun unsigned int lpid;
286*4882a593Smuzhiyun unsigned int smt_mode; /* # vcpus per virtual core */
287*4882a593Smuzhiyun unsigned int emul_smt_mode; /* emualted SMT mode, on P9 */
288*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
289*4882a593Smuzhiyun unsigned int tlb_sets;
290*4882a593Smuzhiyun struct kvm_hpt_info hpt;
291*4882a593Smuzhiyun atomic64_t mmio_update;
292*4882a593Smuzhiyun unsigned int host_lpid;
293*4882a593Smuzhiyun unsigned long host_lpcr;
294*4882a593Smuzhiyun unsigned long sdr1;
295*4882a593Smuzhiyun unsigned long host_sdr1;
296*4882a593Smuzhiyun unsigned long lpcr;
297*4882a593Smuzhiyun unsigned long vrma_slb_v;
298*4882a593Smuzhiyun int mmu_ready;
299*4882a593Smuzhiyun atomic_t vcpus_running;
300*4882a593Smuzhiyun u32 online_vcores;
301*4882a593Smuzhiyun atomic_t hpte_mod_interest;
302*4882a593Smuzhiyun cpumask_t need_tlb_flush;
303*4882a593Smuzhiyun cpumask_t cpu_in_guest;
304*4882a593Smuzhiyun u8 radix;
305*4882a593Smuzhiyun u8 fwnmi_enabled;
306*4882a593Smuzhiyun u8 secure_guest;
307*4882a593Smuzhiyun u8 svm_enabled;
308*4882a593Smuzhiyun bool threads_indep;
309*4882a593Smuzhiyun bool nested_enable;
310*4882a593Smuzhiyun pgd_t *pgtable;
311*4882a593Smuzhiyun u64 process_table;
312*4882a593Smuzhiyun struct dentry *debugfs_dir;
313*4882a593Smuzhiyun struct kvm_resize_hpt *resize_hpt; /* protected by kvm->lock */
314*4882a593Smuzhiyun #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
315*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
316*4882a593Smuzhiyun struct mutex hpt_mutex;
317*4882a593Smuzhiyun #endif
318*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
319*4882a593Smuzhiyun struct list_head spapr_tce_tables;
320*4882a593Smuzhiyun struct list_head rtas_tokens;
321*4882a593Smuzhiyun struct mutex rtas_token_lock;
322*4882a593Smuzhiyun DECLARE_BITMAP(enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
323*4882a593Smuzhiyun #endif
324*4882a593Smuzhiyun #ifdef CONFIG_KVM_MPIC
325*4882a593Smuzhiyun struct openpic *mpic;
326*4882a593Smuzhiyun #endif
327*4882a593Smuzhiyun #ifdef CONFIG_KVM_XICS
328*4882a593Smuzhiyun struct kvmppc_xics *xics;
329*4882a593Smuzhiyun struct kvmppc_xics *xics_device;
330*4882a593Smuzhiyun struct kvmppc_xive *xive; /* Current XIVE device in use */
331*4882a593Smuzhiyun struct {
332*4882a593Smuzhiyun struct kvmppc_xive *native;
333*4882a593Smuzhiyun struct kvmppc_xive *xics_on_xive;
334*4882a593Smuzhiyun } xive_devices;
335*4882a593Smuzhiyun struct kvmppc_passthru_irqmap *pimap;
336*4882a593Smuzhiyun #endif
337*4882a593Smuzhiyun struct kvmppc_ops *kvm_ops;
338*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
339*4882a593Smuzhiyun struct mutex uvmem_lock;
340*4882a593Smuzhiyun struct list_head uvmem_pfns;
341*4882a593Smuzhiyun struct mutex mmu_setup_lock; /* nests inside vcpu mutexes */
342*4882a593Smuzhiyun u64 l1_ptcr;
343*4882a593Smuzhiyun int max_nested_lpid;
344*4882a593Smuzhiyun struct kvm_nested_guest *nested_guests[KVM_MAX_NESTED_GUESTS];
345*4882a593Smuzhiyun /* This array can grow quite large, keep it at the end */
346*4882a593Smuzhiyun struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
347*4882a593Smuzhiyun #endif
348*4882a593Smuzhiyun };
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun #define VCORE_ENTRY_MAP(vc) ((vc)->entry_exit_map & 0xff)
351*4882a593Smuzhiyun #define VCORE_EXIT_MAP(vc) ((vc)->entry_exit_map >> 8)
352*4882a593Smuzhiyun #define VCORE_IS_EXITING(vc) (VCORE_EXIT_MAP(vc) != 0)
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun /* This bit is used when a vcore exit is triggered from outside the vcore */
355*4882a593Smuzhiyun #define VCORE_EXIT_REQ 0x10000
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun /*
358*4882a593Smuzhiyun * Values for vcore_state.
359*4882a593Smuzhiyun * Note that these are arranged such that lower values
360*4882a593Smuzhiyun * (< VCORE_SLEEPING) don't require stolen time accounting
361*4882a593Smuzhiyun * on load/unload, and higher values do.
362*4882a593Smuzhiyun */
363*4882a593Smuzhiyun #define VCORE_INACTIVE 0
364*4882a593Smuzhiyun #define VCORE_PREEMPT 1
365*4882a593Smuzhiyun #define VCORE_PIGGYBACK 2
366*4882a593Smuzhiyun #define VCORE_SLEEPING 3
367*4882a593Smuzhiyun #define VCORE_RUNNING 4
368*4882a593Smuzhiyun #define VCORE_EXITING 5
369*4882a593Smuzhiyun #define VCORE_POLLING 6
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun /*
372*4882a593Smuzhiyun * Struct used to manage memory for a virtual processor area
373*4882a593Smuzhiyun * registered by a PAPR guest. There are three types of area
374*4882a593Smuzhiyun * that a guest can register.
375*4882a593Smuzhiyun */
376*4882a593Smuzhiyun struct kvmppc_vpa {
377*4882a593Smuzhiyun unsigned long gpa; /* Current guest phys addr */
378*4882a593Smuzhiyun void *pinned_addr; /* Address in kernel linear mapping */
379*4882a593Smuzhiyun void *pinned_end; /* End of region */
380*4882a593Smuzhiyun unsigned long next_gpa; /* Guest phys addr for update */
381*4882a593Smuzhiyun unsigned long len; /* Number of bytes required */
382*4882a593Smuzhiyun u8 update_pending; /* 1 => update pinned_addr from next_gpa */
383*4882a593Smuzhiyun bool dirty; /* true => area has been modified by kernel */
384*4882a593Smuzhiyun };
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun struct kvmppc_pte {
387*4882a593Smuzhiyun ulong eaddr;
388*4882a593Smuzhiyun u64 vpage;
389*4882a593Smuzhiyun ulong raddr;
390*4882a593Smuzhiyun bool may_read : 1;
391*4882a593Smuzhiyun bool may_write : 1;
392*4882a593Smuzhiyun bool may_execute : 1;
393*4882a593Smuzhiyun unsigned long wimg;
394*4882a593Smuzhiyun unsigned long rc;
395*4882a593Smuzhiyun u8 page_size; /* MMU_PAGE_xxx */
396*4882a593Smuzhiyun u8 page_shift;
397*4882a593Smuzhiyun };
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun struct kvmppc_mmu {
400*4882a593Smuzhiyun /* book3s_64 only */
401*4882a593Smuzhiyun void (*slbmte)(struct kvm_vcpu *vcpu, u64 rb, u64 rs);
402*4882a593Smuzhiyun u64 (*slbmfee)(struct kvm_vcpu *vcpu, u64 slb_nr);
403*4882a593Smuzhiyun u64 (*slbmfev)(struct kvm_vcpu *vcpu, u64 slb_nr);
404*4882a593Smuzhiyun int (*slbfee)(struct kvm_vcpu *vcpu, gva_t eaddr, ulong *ret_slb);
405*4882a593Smuzhiyun void (*slbie)(struct kvm_vcpu *vcpu, u64 slb_nr);
406*4882a593Smuzhiyun void (*slbia)(struct kvm_vcpu *vcpu);
407*4882a593Smuzhiyun /* book3s */
408*4882a593Smuzhiyun void (*mtsrin)(struct kvm_vcpu *vcpu, u32 srnum, ulong value);
409*4882a593Smuzhiyun u32 (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum);
410*4882a593Smuzhiyun int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr,
411*4882a593Smuzhiyun struct kvmppc_pte *pte, bool data, bool iswrite);
412*4882a593Smuzhiyun void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large);
413*4882a593Smuzhiyun int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid);
414*4882a593Smuzhiyun u64 (*ea_to_vp)(struct kvm_vcpu *vcpu, gva_t eaddr, bool data);
415*4882a593Smuzhiyun bool (*is_dcbz32)(struct kvm_vcpu *vcpu);
416*4882a593Smuzhiyun };
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun struct kvmppc_slb {
419*4882a593Smuzhiyun u64 esid;
420*4882a593Smuzhiyun u64 vsid;
421*4882a593Smuzhiyun u64 orige;
422*4882a593Smuzhiyun u64 origv;
423*4882a593Smuzhiyun bool valid : 1;
424*4882a593Smuzhiyun bool Ks : 1;
425*4882a593Smuzhiyun bool Kp : 1;
426*4882a593Smuzhiyun bool nx : 1;
427*4882a593Smuzhiyun bool large : 1; /* PTEs are 16MB */
428*4882a593Smuzhiyun bool tb : 1; /* 1TB segment */
429*4882a593Smuzhiyun bool class : 1;
430*4882a593Smuzhiyun u8 base_page_size; /* MMU_PAGE_xxx */
431*4882a593Smuzhiyun };
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun /* Struct used to accumulate timing information in HV real mode code */
434*4882a593Smuzhiyun struct kvmhv_tb_accumulator {
435*4882a593Smuzhiyun u64 seqcount; /* used to synchronize access, also count * 2 */
436*4882a593Smuzhiyun u64 tb_total; /* total time in timebase ticks */
437*4882a593Smuzhiyun u64 tb_min; /* min time */
438*4882a593Smuzhiyun u64 tb_max; /* max time */
439*4882a593Smuzhiyun };
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
442*4882a593Smuzhiyun struct kvmppc_irq_map {
443*4882a593Smuzhiyun u32 r_hwirq;
444*4882a593Smuzhiyun u32 v_hwirq;
445*4882a593Smuzhiyun struct irq_desc *desc;
446*4882a593Smuzhiyun };
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun #define KVMPPC_PIRQ_MAPPED 1024
449*4882a593Smuzhiyun struct kvmppc_passthru_irqmap {
450*4882a593Smuzhiyun int n_mapped;
451*4882a593Smuzhiyun struct kvmppc_irq_map mapped[KVMPPC_PIRQ_MAPPED];
452*4882a593Smuzhiyun };
453*4882a593Smuzhiyun #endif
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun # ifdef CONFIG_PPC_FSL_BOOK3E
456*4882a593Smuzhiyun #define KVMPPC_BOOKE_IAC_NUM 2
457*4882a593Smuzhiyun #define KVMPPC_BOOKE_DAC_NUM 2
458*4882a593Smuzhiyun # else
459*4882a593Smuzhiyun #define KVMPPC_BOOKE_IAC_NUM 4
460*4882a593Smuzhiyun #define KVMPPC_BOOKE_DAC_NUM 2
461*4882a593Smuzhiyun # endif
462*4882a593Smuzhiyun #define KVMPPC_BOOKE_MAX_IAC 4
463*4882a593Smuzhiyun #define KVMPPC_BOOKE_MAX_DAC 2
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun /* KVMPPC_EPR_USER takes precedence over KVMPPC_EPR_KERNEL */
466*4882a593Smuzhiyun #define KVMPPC_EPR_NONE 0 /* EPR not supported */
467*4882a593Smuzhiyun #define KVMPPC_EPR_USER 1 /* exit to userspace to fill EPR */
468*4882a593Smuzhiyun #define KVMPPC_EPR_KERNEL 2 /* in-kernel irqchip */
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun #define KVMPPC_IRQ_DEFAULT 0
471*4882a593Smuzhiyun #define KVMPPC_IRQ_MPIC 1
472*4882a593Smuzhiyun #define KVMPPC_IRQ_XICS 2 /* Includes a XIVE option */
473*4882a593Smuzhiyun #define KVMPPC_IRQ_XIVE 3 /* XIVE native exploitation mode */
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun #define MMIO_HPTE_CACHE_SIZE 4
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun struct mmio_hpte_cache_entry {
478*4882a593Smuzhiyun unsigned long hpte_v;
479*4882a593Smuzhiyun unsigned long hpte_r;
480*4882a593Smuzhiyun unsigned long rpte;
481*4882a593Smuzhiyun unsigned long pte_index;
482*4882a593Smuzhiyun unsigned long eaddr;
483*4882a593Smuzhiyun unsigned long slb_v;
484*4882a593Smuzhiyun long mmio_update;
485*4882a593Smuzhiyun unsigned int slb_base_pshift;
486*4882a593Smuzhiyun };
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun struct mmio_hpte_cache {
489*4882a593Smuzhiyun struct mmio_hpte_cache_entry entry[MMIO_HPTE_CACHE_SIZE];
490*4882a593Smuzhiyun unsigned int index;
491*4882a593Smuzhiyun };
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun #define KVMPPC_VSX_COPY_NONE 0
494*4882a593Smuzhiyun #define KVMPPC_VSX_COPY_WORD 1
495*4882a593Smuzhiyun #define KVMPPC_VSX_COPY_DWORD 2
496*4882a593Smuzhiyun #define KVMPPC_VSX_COPY_DWORD_LOAD_DUMP 3
497*4882a593Smuzhiyun #define KVMPPC_VSX_COPY_WORD_LOAD_DUMP 4
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun #define KVMPPC_VMX_COPY_BYTE 8
500*4882a593Smuzhiyun #define KVMPPC_VMX_COPY_HWORD 9
501*4882a593Smuzhiyun #define KVMPPC_VMX_COPY_WORD 10
502*4882a593Smuzhiyun #define KVMPPC_VMX_COPY_DWORD 11
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun struct openpic;
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun /* W0 and W1 of a XIVE thread management context */
507*4882a593Smuzhiyun union xive_tma_w01 {
508*4882a593Smuzhiyun struct {
509*4882a593Smuzhiyun u8 nsr;
510*4882a593Smuzhiyun u8 cppr;
511*4882a593Smuzhiyun u8 ipb;
512*4882a593Smuzhiyun u8 lsmfb;
513*4882a593Smuzhiyun u8 ack;
514*4882a593Smuzhiyun u8 inc;
515*4882a593Smuzhiyun u8 age;
516*4882a593Smuzhiyun u8 pipr;
517*4882a593Smuzhiyun };
518*4882a593Smuzhiyun __be64 w01;
519*4882a593Smuzhiyun };
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun struct kvm_vcpu_arch {
522*4882a593Smuzhiyun ulong host_stack;
523*4882a593Smuzhiyun u32 host_pid;
524*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S
525*4882a593Smuzhiyun struct kvmppc_slb slb[64];
526*4882a593Smuzhiyun int slb_max; /* 1 + index of last valid entry in slb[] */
527*4882a593Smuzhiyun int slb_nr; /* total number of entries in SLB */
528*4882a593Smuzhiyun struct kvmppc_mmu mmu;
529*4882a593Smuzhiyun struct kvmppc_vcpu_book3s *book3s;
530*4882a593Smuzhiyun #endif
531*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_32
532*4882a593Smuzhiyun struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
533*4882a593Smuzhiyun #endif
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun struct pt_regs regs;
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun struct thread_fp_state fp;
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun #ifdef CONFIG_SPE
540*4882a593Smuzhiyun ulong evr[32];
541*4882a593Smuzhiyun ulong spefscr;
542*4882a593Smuzhiyun ulong host_spefscr;
543*4882a593Smuzhiyun u64 acc;
544*4882a593Smuzhiyun #endif
545*4882a593Smuzhiyun #ifdef CONFIG_ALTIVEC
546*4882a593Smuzhiyun struct thread_vr_state vr;
547*4882a593Smuzhiyun #endif
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOKE_HV
550*4882a593Smuzhiyun u32 host_mas4;
551*4882a593Smuzhiyun u32 host_mas6;
552*4882a593Smuzhiyun u32 shadow_epcr;
553*4882a593Smuzhiyun u32 shadow_msrp;
554*4882a593Smuzhiyun u32 eplc;
555*4882a593Smuzhiyun u32 epsc;
556*4882a593Smuzhiyun u32 oldpir;
557*4882a593Smuzhiyun #endif
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun #if defined(CONFIG_BOOKE)
560*4882a593Smuzhiyun #if defined(CONFIG_KVM_BOOKE_HV) || defined(CONFIG_64BIT)
561*4882a593Smuzhiyun u32 epcr;
562*4882a593Smuzhiyun #endif
563*4882a593Smuzhiyun #endif
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S
566*4882a593Smuzhiyun /* For Gekko paired singles */
567*4882a593Smuzhiyun u32 qpr[32];
568*4882a593Smuzhiyun #endif
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S
571*4882a593Smuzhiyun ulong tar;
572*4882a593Smuzhiyun #endif
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S
575*4882a593Smuzhiyun ulong hflags;
576*4882a593Smuzhiyun ulong guest_owned_ext;
577*4882a593Smuzhiyun ulong purr;
578*4882a593Smuzhiyun ulong spurr;
579*4882a593Smuzhiyun ulong ic;
580*4882a593Smuzhiyun ulong dscr;
581*4882a593Smuzhiyun ulong amr;
582*4882a593Smuzhiyun ulong uamor;
583*4882a593Smuzhiyun ulong iamr;
584*4882a593Smuzhiyun u32 ctrl;
585*4882a593Smuzhiyun u32 dabrx;
586*4882a593Smuzhiyun ulong dabr;
587*4882a593Smuzhiyun ulong dawr;
588*4882a593Smuzhiyun ulong dawrx;
589*4882a593Smuzhiyun ulong ciabr;
590*4882a593Smuzhiyun ulong cfar;
591*4882a593Smuzhiyun ulong ppr;
592*4882a593Smuzhiyun u32 pspb;
593*4882a593Smuzhiyun ulong fscr;
594*4882a593Smuzhiyun ulong shadow_fscr;
595*4882a593Smuzhiyun ulong ebbhr;
596*4882a593Smuzhiyun ulong ebbrr;
597*4882a593Smuzhiyun ulong bescr;
598*4882a593Smuzhiyun ulong csigr;
599*4882a593Smuzhiyun ulong tacr;
600*4882a593Smuzhiyun ulong tcscr;
601*4882a593Smuzhiyun ulong acop;
602*4882a593Smuzhiyun ulong wort;
603*4882a593Smuzhiyun ulong tid;
604*4882a593Smuzhiyun ulong psscr;
605*4882a593Smuzhiyun ulong hfscr;
606*4882a593Smuzhiyun ulong shadow_srr1;
607*4882a593Smuzhiyun #endif
608*4882a593Smuzhiyun u32 vrsave; /* also USPRG0 */
609*4882a593Smuzhiyun u32 mmucr;
610*4882a593Smuzhiyun /* shadow_msr is unused for BookE HV */
611*4882a593Smuzhiyun ulong shadow_msr;
612*4882a593Smuzhiyun ulong csrr0;
613*4882a593Smuzhiyun ulong csrr1;
614*4882a593Smuzhiyun ulong dsrr0;
615*4882a593Smuzhiyun ulong dsrr1;
616*4882a593Smuzhiyun ulong mcsrr0;
617*4882a593Smuzhiyun ulong mcsrr1;
618*4882a593Smuzhiyun ulong mcsr;
619*4882a593Smuzhiyun ulong dec;
620*4882a593Smuzhiyun #ifdef CONFIG_BOOKE
621*4882a593Smuzhiyun u32 decar;
622*4882a593Smuzhiyun #endif
623*4882a593Smuzhiyun /* Time base value when we entered the guest */
624*4882a593Smuzhiyun u64 entry_tb;
625*4882a593Smuzhiyun u64 entry_vtb;
626*4882a593Smuzhiyun u64 entry_ic;
627*4882a593Smuzhiyun u32 tcr;
628*4882a593Smuzhiyun ulong tsr; /* we need to perform set/clr_bits() which requires ulong */
629*4882a593Smuzhiyun u32 ivor[64];
630*4882a593Smuzhiyun ulong ivpr;
631*4882a593Smuzhiyun u32 pvr;
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun u32 shadow_pid;
634*4882a593Smuzhiyun u32 shadow_pid1;
635*4882a593Smuzhiyun u32 pid;
636*4882a593Smuzhiyun u32 swap_pid;
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun u32 ccr0;
639*4882a593Smuzhiyun u32 ccr1;
640*4882a593Smuzhiyun u32 dbsr;
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun u64 mmcr[4]; /* MMCR0, MMCR1, MMCR2, MMCR3 */
643*4882a593Smuzhiyun u64 mmcra;
644*4882a593Smuzhiyun u64 mmcrs;
645*4882a593Smuzhiyun u32 pmc[8];
646*4882a593Smuzhiyun u32 spmc[2];
647*4882a593Smuzhiyun u64 siar;
648*4882a593Smuzhiyun u64 sdar;
649*4882a593Smuzhiyun u64 sier[3];
650*4882a593Smuzhiyun #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
651*4882a593Smuzhiyun u64 tfhar;
652*4882a593Smuzhiyun u64 texasr;
653*4882a593Smuzhiyun u64 tfiar;
654*4882a593Smuzhiyun u64 orig_texasr;
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun u32 cr_tm;
657*4882a593Smuzhiyun u64 xer_tm;
658*4882a593Smuzhiyun u64 lr_tm;
659*4882a593Smuzhiyun u64 ctr_tm;
660*4882a593Smuzhiyun u64 amr_tm;
661*4882a593Smuzhiyun u64 ppr_tm;
662*4882a593Smuzhiyun u64 dscr_tm;
663*4882a593Smuzhiyun u64 tar_tm;
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun ulong gpr_tm[32];
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun struct thread_fp_state fp_tm;
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun struct thread_vr_state vr_tm;
670*4882a593Smuzhiyun u32 vrsave_tm; /* also USPRG0 */
671*4882a593Smuzhiyun #endif
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun #ifdef CONFIG_KVM_EXIT_TIMING
674*4882a593Smuzhiyun struct mutex exit_timing_lock;
675*4882a593Smuzhiyun struct kvmppc_exit_timing timing_exit;
676*4882a593Smuzhiyun struct kvmppc_exit_timing timing_last_enter;
677*4882a593Smuzhiyun u32 last_exit_type;
678*4882a593Smuzhiyun u32 timing_count_type[__NUMBER_OF_KVM_EXIT_TYPES];
679*4882a593Smuzhiyun u64 timing_sum_duration[__NUMBER_OF_KVM_EXIT_TYPES];
680*4882a593Smuzhiyun u64 timing_sum_quad_duration[__NUMBER_OF_KVM_EXIT_TYPES];
681*4882a593Smuzhiyun u64 timing_min_duration[__NUMBER_OF_KVM_EXIT_TYPES];
682*4882a593Smuzhiyun u64 timing_max_duration[__NUMBER_OF_KVM_EXIT_TYPES];
683*4882a593Smuzhiyun u64 timing_last_exit;
684*4882a593Smuzhiyun struct dentry *debugfs_exit_timing;
685*4882a593Smuzhiyun #endif
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S
688*4882a593Smuzhiyun ulong fault_dar;
689*4882a593Smuzhiyun u32 fault_dsisr;
690*4882a593Smuzhiyun unsigned long intr_msr;
691*4882a593Smuzhiyun ulong fault_gpa; /* guest real address of page fault (POWER9) */
692*4882a593Smuzhiyun #endif
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun #ifdef CONFIG_BOOKE
695*4882a593Smuzhiyun ulong fault_dear;
696*4882a593Smuzhiyun ulong fault_esr;
697*4882a593Smuzhiyun ulong queued_dear;
698*4882a593Smuzhiyun ulong queued_esr;
699*4882a593Smuzhiyun spinlock_t wdt_lock;
700*4882a593Smuzhiyun struct timer_list wdt_timer;
701*4882a593Smuzhiyun u32 tlbcfg[4];
702*4882a593Smuzhiyun u32 tlbps[4];
703*4882a593Smuzhiyun u32 mmucfg;
704*4882a593Smuzhiyun u32 eptcfg;
705*4882a593Smuzhiyun u32 epr;
706*4882a593Smuzhiyun u64 sprg9;
707*4882a593Smuzhiyun u32 pwrmgtcr0;
708*4882a593Smuzhiyun u32 crit_save;
709*4882a593Smuzhiyun /* guest debug registers*/
710*4882a593Smuzhiyun struct debug_reg dbg_reg;
711*4882a593Smuzhiyun #endif
712*4882a593Smuzhiyun gpa_t paddr_accessed;
713*4882a593Smuzhiyun gva_t vaddr_accessed;
714*4882a593Smuzhiyun pgd_t *pgdir;
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun u16 io_gpr; /* GPR used as IO source/target */
717*4882a593Smuzhiyun u8 mmio_host_swabbed;
718*4882a593Smuzhiyun u8 mmio_sign_extend;
719*4882a593Smuzhiyun /* conversion between single and double precision */
720*4882a593Smuzhiyun u8 mmio_sp64_extend;
721*4882a593Smuzhiyun /*
722*4882a593Smuzhiyun * Number of simulations for vsx.
723*4882a593Smuzhiyun * If we use 2*8bytes to simulate 1*16bytes,
724*4882a593Smuzhiyun * then the number should be 2 and
725*4882a593Smuzhiyun * mmio_copy_type=KVMPPC_VSX_COPY_DWORD.
726*4882a593Smuzhiyun * If we use 4*4bytes to simulate 1*16bytes,
727*4882a593Smuzhiyun * the number should be 4 and
728*4882a593Smuzhiyun * mmio_vsx_copy_type=KVMPPC_VSX_COPY_WORD.
729*4882a593Smuzhiyun */
730*4882a593Smuzhiyun u8 mmio_vsx_copy_nums;
731*4882a593Smuzhiyun u8 mmio_vsx_offset;
732*4882a593Smuzhiyun u8 mmio_vmx_copy_nums;
733*4882a593Smuzhiyun u8 mmio_vmx_offset;
734*4882a593Smuzhiyun u8 mmio_copy_type;
735*4882a593Smuzhiyun u8 osi_needed;
736*4882a593Smuzhiyun u8 osi_enabled;
737*4882a593Smuzhiyun u8 papr_enabled;
738*4882a593Smuzhiyun u8 watchdog_enabled;
739*4882a593Smuzhiyun u8 sane;
740*4882a593Smuzhiyun u8 cpu_type;
741*4882a593Smuzhiyun u8 hcall_needed;
742*4882a593Smuzhiyun u8 epr_flags; /* KVMPPC_EPR_xxx */
743*4882a593Smuzhiyun u8 epr_needed;
744*4882a593Smuzhiyun u8 external_oneshot; /* clear external irq after delivery */
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun struct hrtimer dec_timer;
749*4882a593Smuzhiyun u64 dec_jiffies;
750*4882a593Smuzhiyun u64 dec_expires;
751*4882a593Smuzhiyun unsigned long pending_exceptions;
752*4882a593Smuzhiyun u8 ceded;
753*4882a593Smuzhiyun u8 prodded;
754*4882a593Smuzhiyun u8 doorbell_request;
755*4882a593Smuzhiyun u8 irq_pending; /* Used by XIVE to signal pending guest irqs */
756*4882a593Smuzhiyun u32 last_inst;
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun struct rcuwait *waitp;
759*4882a593Smuzhiyun struct kvmppc_vcore *vcore;
760*4882a593Smuzhiyun int ret;
761*4882a593Smuzhiyun int trap;
762*4882a593Smuzhiyun int state;
763*4882a593Smuzhiyun int ptid;
764*4882a593Smuzhiyun int thread_cpu;
765*4882a593Smuzhiyun int prev_cpu;
766*4882a593Smuzhiyun bool timer_running;
767*4882a593Smuzhiyun wait_queue_head_t cpu_run;
768*4882a593Smuzhiyun struct machine_check_event mce_evt; /* Valid if trap == 0x200 */
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun struct kvm_vcpu_arch_shared *shared;
771*4882a593Smuzhiyun #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
772*4882a593Smuzhiyun bool shared_big_endian;
773*4882a593Smuzhiyun #endif
774*4882a593Smuzhiyun unsigned long magic_page_pa; /* phys addr to map the magic page to */
775*4882a593Smuzhiyun unsigned long magic_page_ea; /* effect. addr to map the magic page to */
776*4882a593Smuzhiyun bool disable_kernel_nx;
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun int irq_type; /* one of KVM_IRQ_* */
779*4882a593Smuzhiyun int irq_cpu_id;
780*4882a593Smuzhiyun struct openpic *mpic; /* KVM_IRQ_MPIC */
781*4882a593Smuzhiyun #ifdef CONFIG_KVM_XICS
782*4882a593Smuzhiyun struct kvmppc_icp *icp; /* XICS presentation controller */
783*4882a593Smuzhiyun struct kvmppc_xive_vcpu *xive_vcpu; /* XIVE virtual CPU data */
784*4882a593Smuzhiyun __be32 xive_cam_word; /* Cooked W2 in proper endian with valid bit */
785*4882a593Smuzhiyun u8 xive_pushed; /* Is the VP pushed on the physical CPU ? */
786*4882a593Smuzhiyun u8 xive_esc_on; /* Is the escalation irq enabled ? */
787*4882a593Smuzhiyun union xive_tma_w01 xive_saved_state; /* W0..1 of XIVE thread state */
788*4882a593Smuzhiyun u64 xive_esc_raddr; /* Escalation interrupt ESB real addr */
789*4882a593Smuzhiyun u64 xive_esc_vaddr; /* Escalation interrupt ESB virt addr */
790*4882a593Smuzhiyun #endif
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
793*4882a593Smuzhiyun struct kvm_vcpu_arch_shared shregs;
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun struct mmio_hpte_cache mmio_cache;
796*4882a593Smuzhiyun unsigned long pgfault_addr;
797*4882a593Smuzhiyun long pgfault_index;
798*4882a593Smuzhiyun unsigned long pgfault_hpte[2];
799*4882a593Smuzhiyun struct mmio_hpte_cache_entry *pgfault_cache;
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun struct task_struct *run_task;
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun spinlock_t vpa_update_lock;
804*4882a593Smuzhiyun struct kvmppc_vpa vpa;
805*4882a593Smuzhiyun struct kvmppc_vpa dtl;
806*4882a593Smuzhiyun struct dtl_entry *dtl_ptr;
807*4882a593Smuzhiyun unsigned long dtl_index;
808*4882a593Smuzhiyun u64 stolen_logged;
809*4882a593Smuzhiyun struct kvmppc_vpa slb_shadow;
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun spinlock_t tbacct_lock;
812*4882a593Smuzhiyun u64 busy_stolen;
813*4882a593Smuzhiyun u64 busy_preempt;
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun u32 emul_inst;
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun u32 online;
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun /* For support of nested guests */
820*4882a593Smuzhiyun struct kvm_nested_guest *nested;
821*4882a593Smuzhiyun u32 nested_vcpu_id;
822*4882a593Smuzhiyun gpa_t nested_io_gpr;
823*4882a593Smuzhiyun #endif
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
826*4882a593Smuzhiyun struct kvmhv_tb_accumulator *cur_activity; /* What we're timing */
827*4882a593Smuzhiyun u64 cur_tb_start; /* when it started */
828*4882a593Smuzhiyun struct kvmhv_tb_accumulator rm_entry; /* real-mode entry code */
829*4882a593Smuzhiyun struct kvmhv_tb_accumulator rm_intr; /* real-mode intr handling */
830*4882a593Smuzhiyun struct kvmhv_tb_accumulator rm_exit; /* real-mode exit code */
831*4882a593Smuzhiyun struct kvmhv_tb_accumulator guest_time; /* guest execution */
832*4882a593Smuzhiyun struct kvmhv_tb_accumulator cede_time; /* time napping inside guest */
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun struct dentry *debugfs_dir;
835*4882a593Smuzhiyun #endif /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
836*4882a593Smuzhiyun };
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun #define VCPU_FPR(vcpu, i) (vcpu)->arch.fp.fpr[i][TS_FPROFFSET]
839*4882a593Smuzhiyun #define VCPU_VSX_FPR(vcpu, i, j) ((vcpu)->arch.fp.fpr[i][j])
840*4882a593Smuzhiyun #define VCPU_VSX_VR(vcpu, i) ((vcpu)->arch.vr.vr[i])
841*4882a593Smuzhiyun
842*4882a593Smuzhiyun /* Values for vcpu->arch.state */
843*4882a593Smuzhiyun #define KVMPPC_VCPU_NOTREADY 0
844*4882a593Smuzhiyun #define KVMPPC_VCPU_RUNNABLE 1
845*4882a593Smuzhiyun #define KVMPPC_VCPU_BUSY_IN_HOST 2
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun /* Values for vcpu->arch.io_gpr */
848*4882a593Smuzhiyun #define KVM_MMIO_REG_MASK 0x003f
849*4882a593Smuzhiyun #define KVM_MMIO_REG_EXT_MASK 0xffc0
850*4882a593Smuzhiyun #define KVM_MMIO_REG_GPR 0x0000
851*4882a593Smuzhiyun #define KVM_MMIO_REG_FPR 0x0040
852*4882a593Smuzhiyun #define KVM_MMIO_REG_QPR 0x0080
853*4882a593Smuzhiyun #define KVM_MMIO_REG_FQPR 0x00c0
854*4882a593Smuzhiyun #define KVM_MMIO_REG_VSX 0x0100
855*4882a593Smuzhiyun #define KVM_MMIO_REG_VMX 0x0180
856*4882a593Smuzhiyun #define KVM_MMIO_REG_NESTED_GPR 0xffc0
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun #define __KVM_HAVE_ARCH_WQP
860*4882a593Smuzhiyun #define __KVM_HAVE_CREATE_DEVICE
861*4882a593Smuzhiyun
kvm_arch_hardware_disable(void)862*4882a593Smuzhiyun static inline void kvm_arch_hardware_disable(void) {}
kvm_arch_hardware_unsetup(void)863*4882a593Smuzhiyun static inline void kvm_arch_hardware_unsetup(void) {}
kvm_arch_sync_events(struct kvm * kvm)864*4882a593Smuzhiyun static inline void kvm_arch_sync_events(struct kvm *kvm) {}
kvm_arch_memslots_updated(struct kvm * kvm,u64 gen)865*4882a593Smuzhiyun static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
kvm_arch_flush_shadow_all(struct kvm * kvm)866*4882a593Smuzhiyun static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
kvm_arch_sched_in(struct kvm_vcpu * vcpu,int cpu)867*4882a593Smuzhiyun static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
kvm_arch_exit(void)868*4882a593Smuzhiyun static inline void kvm_arch_exit(void) {}
kvm_arch_vcpu_blocking(struct kvm_vcpu * vcpu)869*4882a593Smuzhiyun static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
kvm_arch_vcpu_unblocking(struct kvm_vcpu * vcpu)870*4882a593Smuzhiyun static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
kvm_arch_vcpu_block_finish(struct kvm_vcpu * vcpu)871*4882a593Smuzhiyun static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun #endif /* __POWERPC_KVM_HOST_H__ */
874