xref: /OK3568_Linux_fs/kernel/arch/x86/include/uapi/asm/kvm.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2*4882a593Smuzhiyun #ifndef _ASM_X86_KVM_H
3*4882a593Smuzhiyun #define _ASM_X86_KVM_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun /*
6*4882a593Smuzhiyun  * KVM x86 specific structures and definitions
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/types.h>
11*4882a593Smuzhiyun #include <linux/ioctl.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #define KVM_PIO_PAGE_OFFSET 1
14*4882a593Smuzhiyun #define KVM_COALESCED_MMIO_PAGE_OFFSET 2
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #define DE_VECTOR 0
17*4882a593Smuzhiyun #define DB_VECTOR 1
18*4882a593Smuzhiyun #define BP_VECTOR 3
19*4882a593Smuzhiyun #define OF_VECTOR 4
20*4882a593Smuzhiyun #define BR_VECTOR 5
21*4882a593Smuzhiyun #define UD_VECTOR 6
22*4882a593Smuzhiyun #define NM_VECTOR 7
23*4882a593Smuzhiyun #define DF_VECTOR 8
24*4882a593Smuzhiyun #define TS_VECTOR 10
25*4882a593Smuzhiyun #define NP_VECTOR 11
26*4882a593Smuzhiyun #define SS_VECTOR 12
27*4882a593Smuzhiyun #define GP_VECTOR 13
28*4882a593Smuzhiyun #define PF_VECTOR 14
29*4882a593Smuzhiyun #define MF_VECTOR 16
30*4882a593Smuzhiyun #define AC_VECTOR 17
31*4882a593Smuzhiyun #define MC_VECTOR 18
32*4882a593Smuzhiyun #define XM_VECTOR 19
33*4882a593Smuzhiyun #define VE_VECTOR 20
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun /* Select x86 specific features in <linux/kvm.h> */
36*4882a593Smuzhiyun #define __KVM_HAVE_PIT
37*4882a593Smuzhiyun #define __KVM_HAVE_IOAPIC
38*4882a593Smuzhiyun #define __KVM_HAVE_IRQ_LINE
39*4882a593Smuzhiyun #define __KVM_HAVE_MSI
40*4882a593Smuzhiyun #define __KVM_HAVE_USER_NMI
41*4882a593Smuzhiyun #define __KVM_HAVE_GUEST_DEBUG
42*4882a593Smuzhiyun #define __KVM_HAVE_MSIX
43*4882a593Smuzhiyun #define __KVM_HAVE_MCE
44*4882a593Smuzhiyun #define __KVM_HAVE_PIT_STATE2
45*4882a593Smuzhiyun #define __KVM_HAVE_XEN_HVM
46*4882a593Smuzhiyun #define __KVM_HAVE_VCPU_EVENTS
47*4882a593Smuzhiyun #define __KVM_HAVE_DEBUGREGS
48*4882a593Smuzhiyun #define __KVM_HAVE_XSAVE
49*4882a593Smuzhiyun #define __KVM_HAVE_XCRS
50*4882a593Smuzhiyun #define __KVM_HAVE_READONLY_MEM
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun /* Architectural interrupt line count. */
53*4882a593Smuzhiyun #define KVM_NR_INTERRUPTS 256
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun struct kvm_memory_alias {
56*4882a593Smuzhiyun 	__u32 slot;  /* this has a different namespace than memory slots */
57*4882a593Smuzhiyun 	__u32 flags;
58*4882a593Smuzhiyun 	__u64 guest_phys_addr;
59*4882a593Smuzhiyun 	__u64 memory_size;
60*4882a593Smuzhiyun 	__u64 target_phys_addr;
61*4882a593Smuzhiyun };
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun /* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */
64*4882a593Smuzhiyun struct kvm_pic_state {
65*4882a593Smuzhiyun 	__u8 last_irr;	/* edge detection */
66*4882a593Smuzhiyun 	__u8 irr;		/* interrupt request register */
67*4882a593Smuzhiyun 	__u8 imr;		/* interrupt mask register */
68*4882a593Smuzhiyun 	__u8 isr;		/* interrupt service register */
69*4882a593Smuzhiyun 	__u8 priority_add;	/* highest irq priority */
70*4882a593Smuzhiyun 	__u8 irq_base;
71*4882a593Smuzhiyun 	__u8 read_reg_select;
72*4882a593Smuzhiyun 	__u8 poll;
73*4882a593Smuzhiyun 	__u8 special_mask;
74*4882a593Smuzhiyun 	__u8 init_state;
75*4882a593Smuzhiyun 	__u8 auto_eoi;
76*4882a593Smuzhiyun 	__u8 rotate_on_auto_eoi;
77*4882a593Smuzhiyun 	__u8 special_fully_nested_mode;
78*4882a593Smuzhiyun 	__u8 init4;		/* true if 4 byte init */
79*4882a593Smuzhiyun 	__u8 elcr;		/* PIIX edge/trigger selection */
80*4882a593Smuzhiyun 	__u8 elcr_mask;
81*4882a593Smuzhiyun };
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun #define KVM_IOAPIC_NUM_PINS  24
84*4882a593Smuzhiyun struct kvm_ioapic_state {
85*4882a593Smuzhiyun 	__u64 base_address;
86*4882a593Smuzhiyun 	__u32 ioregsel;
87*4882a593Smuzhiyun 	__u32 id;
88*4882a593Smuzhiyun 	__u32 irr;
89*4882a593Smuzhiyun 	__u32 pad;
90*4882a593Smuzhiyun 	union {
91*4882a593Smuzhiyun 		__u64 bits;
92*4882a593Smuzhiyun 		struct {
93*4882a593Smuzhiyun 			__u8 vector;
94*4882a593Smuzhiyun 			__u8 delivery_mode:3;
95*4882a593Smuzhiyun 			__u8 dest_mode:1;
96*4882a593Smuzhiyun 			__u8 delivery_status:1;
97*4882a593Smuzhiyun 			__u8 polarity:1;
98*4882a593Smuzhiyun 			__u8 remote_irr:1;
99*4882a593Smuzhiyun 			__u8 trig_mode:1;
100*4882a593Smuzhiyun 			__u8 mask:1;
101*4882a593Smuzhiyun 			__u8 reserve:7;
102*4882a593Smuzhiyun 			__u8 reserved[4];
103*4882a593Smuzhiyun 			__u8 dest_id;
104*4882a593Smuzhiyun 		} fields;
105*4882a593Smuzhiyun 	} redirtbl[KVM_IOAPIC_NUM_PINS];
106*4882a593Smuzhiyun };
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun #define KVM_IRQCHIP_PIC_MASTER   0
109*4882a593Smuzhiyun #define KVM_IRQCHIP_PIC_SLAVE    1
110*4882a593Smuzhiyun #define KVM_IRQCHIP_IOAPIC       2
111*4882a593Smuzhiyun #define KVM_NR_IRQCHIPS          3
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun #define KVM_RUN_X86_SMM		 (1 << 0)
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun /* for KVM_GET_REGS and KVM_SET_REGS */
116*4882a593Smuzhiyun struct kvm_regs {
117*4882a593Smuzhiyun 	/* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
118*4882a593Smuzhiyun 	__u64 rax, rbx, rcx, rdx;
119*4882a593Smuzhiyun 	__u64 rsi, rdi, rsp, rbp;
120*4882a593Smuzhiyun 	__u64 r8,  r9,  r10, r11;
121*4882a593Smuzhiyun 	__u64 r12, r13, r14, r15;
122*4882a593Smuzhiyun 	__u64 rip, rflags;
123*4882a593Smuzhiyun };
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun /* for KVM_GET_LAPIC and KVM_SET_LAPIC */
126*4882a593Smuzhiyun #define KVM_APIC_REG_SIZE 0x400
127*4882a593Smuzhiyun struct kvm_lapic_state {
128*4882a593Smuzhiyun 	char regs[KVM_APIC_REG_SIZE];
129*4882a593Smuzhiyun };
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun struct kvm_segment {
132*4882a593Smuzhiyun 	__u64 base;
133*4882a593Smuzhiyun 	__u32 limit;
134*4882a593Smuzhiyun 	__u16 selector;
135*4882a593Smuzhiyun 	__u8  type;
136*4882a593Smuzhiyun 	__u8  present, dpl, db, s, l, g, avl;
137*4882a593Smuzhiyun 	__u8  unusable;
138*4882a593Smuzhiyun 	__u8  padding;
139*4882a593Smuzhiyun };
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun struct kvm_dtable {
142*4882a593Smuzhiyun 	__u64 base;
143*4882a593Smuzhiyun 	__u16 limit;
144*4882a593Smuzhiyun 	__u16 padding[3];
145*4882a593Smuzhiyun };
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun /* for KVM_GET_SREGS and KVM_SET_SREGS */
149*4882a593Smuzhiyun struct kvm_sregs {
150*4882a593Smuzhiyun 	/* out (KVM_GET_SREGS) / in (KVM_SET_SREGS) */
151*4882a593Smuzhiyun 	struct kvm_segment cs, ds, es, fs, gs, ss;
152*4882a593Smuzhiyun 	struct kvm_segment tr, ldt;
153*4882a593Smuzhiyun 	struct kvm_dtable gdt, idt;
154*4882a593Smuzhiyun 	__u64 cr0, cr2, cr3, cr4, cr8;
155*4882a593Smuzhiyun 	__u64 efer;
156*4882a593Smuzhiyun 	__u64 apic_base;
157*4882a593Smuzhiyun 	__u64 interrupt_bitmap[(KVM_NR_INTERRUPTS + 63) / 64];
158*4882a593Smuzhiyun };
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun /* for KVM_GET_FPU and KVM_SET_FPU */
161*4882a593Smuzhiyun struct kvm_fpu {
162*4882a593Smuzhiyun 	__u8  fpr[8][16];
163*4882a593Smuzhiyun 	__u16 fcw;
164*4882a593Smuzhiyun 	__u16 fsw;
165*4882a593Smuzhiyun 	__u8  ftwx;  /* in fxsave format */
166*4882a593Smuzhiyun 	__u8  pad1;
167*4882a593Smuzhiyun 	__u16 last_opcode;
168*4882a593Smuzhiyun 	__u64 last_ip;
169*4882a593Smuzhiyun 	__u64 last_dp;
170*4882a593Smuzhiyun 	__u8  xmm[16][16];
171*4882a593Smuzhiyun 	__u32 mxcsr;
172*4882a593Smuzhiyun 	__u32 pad2;
173*4882a593Smuzhiyun };
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun struct kvm_msr_entry {
176*4882a593Smuzhiyun 	__u32 index;
177*4882a593Smuzhiyun 	__u32 reserved;
178*4882a593Smuzhiyun 	__u64 data;
179*4882a593Smuzhiyun };
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun /* for KVM_GET_MSRS and KVM_SET_MSRS */
182*4882a593Smuzhiyun struct kvm_msrs {
183*4882a593Smuzhiyun 	__u32 nmsrs; /* number of msrs in entries */
184*4882a593Smuzhiyun 	__u32 pad;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	struct kvm_msr_entry entries[0];
187*4882a593Smuzhiyun };
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun /* for KVM_GET_MSR_INDEX_LIST */
190*4882a593Smuzhiyun struct kvm_msr_list {
191*4882a593Smuzhiyun 	__u32 nmsrs; /* number of msrs in entries */
192*4882a593Smuzhiyun 	__u32 indices[0];
193*4882a593Smuzhiyun };
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun /* Maximum size of any access bitmap in bytes */
196*4882a593Smuzhiyun #define KVM_MSR_FILTER_MAX_BITMAP_SIZE 0x600
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun /* for KVM_X86_SET_MSR_FILTER */
199*4882a593Smuzhiyun struct kvm_msr_filter_range {
200*4882a593Smuzhiyun #define KVM_MSR_FILTER_READ  (1 << 0)
201*4882a593Smuzhiyun #define KVM_MSR_FILTER_WRITE (1 << 1)
202*4882a593Smuzhiyun 	__u32 flags;
203*4882a593Smuzhiyun 	__u32 nmsrs; /* number of msrs in bitmap */
204*4882a593Smuzhiyun 	__u32 base;  /* MSR index the bitmap starts at */
205*4882a593Smuzhiyun 	__u8 *bitmap; /* a 1 bit allows the operations in flags, 0 denies */
206*4882a593Smuzhiyun };
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun #define KVM_MSR_FILTER_MAX_RANGES 16
209*4882a593Smuzhiyun struct kvm_msr_filter {
210*4882a593Smuzhiyun #define KVM_MSR_FILTER_DEFAULT_ALLOW (0 << 0)
211*4882a593Smuzhiyun #define KVM_MSR_FILTER_DEFAULT_DENY  (1 << 0)
212*4882a593Smuzhiyun 	__u32 flags;
213*4882a593Smuzhiyun 	struct kvm_msr_filter_range ranges[KVM_MSR_FILTER_MAX_RANGES];
214*4882a593Smuzhiyun };
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun struct kvm_cpuid_entry {
217*4882a593Smuzhiyun 	__u32 function;
218*4882a593Smuzhiyun 	__u32 eax;
219*4882a593Smuzhiyun 	__u32 ebx;
220*4882a593Smuzhiyun 	__u32 ecx;
221*4882a593Smuzhiyun 	__u32 edx;
222*4882a593Smuzhiyun 	__u32 padding;
223*4882a593Smuzhiyun };
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun /* for KVM_SET_CPUID */
226*4882a593Smuzhiyun struct kvm_cpuid {
227*4882a593Smuzhiyun 	__u32 nent;
228*4882a593Smuzhiyun 	__u32 padding;
229*4882a593Smuzhiyun 	struct kvm_cpuid_entry entries[0];
230*4882a593Smuzhiyun };
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun struct kvm_cpuid_entry2 {
233*4882a593Smuzhiyun 	__u32 function;
234*4882a593Smuzhiyun 	__u32 index;
235*4882a593Smuzhiyun 	__u32 flags;
236*4882a593Smuzhiyun 	__u32 eax;
237*4882a593Smuzhiyun 	__u32 ebx;
238*4882a593Smuzhiyun 	__u32 ecx;
239*4882a593Smuzhiyun 	__u32 edx;
240*4882a593Smuzhiyun 	__u32 padding[3];
241*4882a593Smuzhiyun };
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun #define KVM_CPUID_FLAG_SIGNIFCANT_INDEX		(1 << 0)
244*4882a593Smuzhiyun #define KVM_CPUID_FLAG_STATEFUL_FUNC		(1 << 1)
245*4882a593Smuzhiyun #define KVM_CPUID_FLAG_STATE_READ_NEXT		(1 << 2)
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun /* for KVM_SET_CPUID2 */
248*4882a593Smuzhiyun struct kvm_cpuid2 {
249*4882a593Smuzhiyun 	__u32 nent;
250*4882a593Smuzhiyun 	__u32 padding;
251*4882a593Smuzhiyun 	struct kvm_cpuid_entry2 entries[0];
252*4882a593Smuzhiyun };
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun /* for KVM_GET_PIT and KVM_SET_PIT */
255*4882a593Smuzhiyun struct kvm_pit_channel_state {
256*4882a593Smuzhiyun 	__u32 count; /* can be 65536 */
257*4882a593Smuzhiyun 	__u16 latched_count;
258*4882a593Smuzhiyun 	__u8 count_latched;
259*4882a593Smuzhiyun 	__u8 status_latched;
260*4882a593Smuzhiyun 	__u8 status;
261*4882a593Smuzhiyun 	__u8 read_state;
262*4882a593Smuzhiyun 	__u8 write_state;
263*4882a593Smuzhiyun 	__u8 write_latch;
264*4882a593Smuzhiyun 	__u8 rw_mode;
265*4882a593Smuzhiyun 	__u8 mode;
266*4882a593Smuzhiyun 	__u8 bcd;
267*4882a593Smuzhiyun 	__u8 gate;
268*4882a593Smuzhiyun 	__s64 count_load_time;
269*4882a593Smuzhiyun };
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun struct kvm_debug_exit_arch {
272*4882a593Smuzhiyun 	__u32 exception;
273*4882a593Smuzhiyun 	__u32 pad;
274*4882a593Smuzhiyun 	__u64 pc;
275*4882a593Smuzhiyun 	__u64 dr6;
276*4882a593Smuzhiyun 	__u64 dr7;
277*4882a593Smuzhiyun };
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun #define KVM_GUESTDBG_USE_SW_BP		0x00010000
280*4882a593Smuzhiyun #define KVM_GUESTDBG_USE_HW_BP		0x00020000
281*4882a593Smuzhiyun #define KVM_GUESTDBG_INJECT_DB		0x00040000
282*4882a593Smuzhiyun #define KVM_GUESTDBG_INJECT_BP		0x00080000
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun /* for KVM_SET_GUEST_DEBUG */
285*4882a593Smuzhiyun struct kvm_guest_debug_arch {
286*4882a593Smuzhiyun 	__u64 debugreg[8];
287*4882a593Smuzhiyun };
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun struct kvm_pit_state {
290*4882a593Smuzhiyun 	struct kvm_pit_channel_state channels[3];
291*4882a593Smuzhiyun };
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun #define KVM_PIT_FLAGS_HPET_LEGACY  0x00000001
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun struct kvm_pit_state2 {
296*4882a593Smuzhiyun 	struct kvm_pit_channel_state channels[3];
297*4882a593Smuzhiyun 	__u32 flags;
298*4882a593Smuzhiyun 	__u32 reserved[9];
299*4882a593Smuzhiyun };
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun struct kvm_reinject_control {
302*4882a593Smuzhiyun 	__u8 pit_reinject;
303*4882a593Smuzhiyun 	__u8 reserved[31];
304*4882a593Smuzhiyun };
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun /* When set in flags, include corresponding fields on KVM_SET_VCPU_EVENTS */
307*4882a593Smuzhiyun #define KVM_VCPUEVENT_VALID_NMI_PENDING	0x00000001
308*4882a593Smuzhiyun #define KVM_VCPUEVENT_VALID_SIPI_VECTOR	0x00000002
309*4882a593Smuzhiyun #define KVM_VCPUEVENT_VALID_SHADOW	0x00000004
310*4882a593Smuzhiyun #define KVM_VCPUEVENT_VALID_SMM		0x00000008
311*4882a593Smuzhiyun #define KVM_VCPUEVENT_VALID_PAYLOAD	0x00000010
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun /* Interrupt shadow states */
314*4882a593Smuzhiyun #define KVM_X86_SHADOW_INT_MOV_SS	0x01
315*4882a593Smuzhiyun #define KVM_X86_SHADOW_INT_STI		0x02
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun /* for KVM_GET/SET_VCPU_EVENTS */
318*4882a593Smuzhiyun struct kvm_vcpu_events {
319*4882a593Smuzhiyun 	struct {
320*4882a593Smuzhiyun 		__u8 injected;
321*4882a593Smuzhiyun 		__u8 nr;
322*4882a593Smuzhiyun 		__u8 has_error_code;
323*4882a593Smuzhiyun 		__u8 pending;
324*4882a593Smuzhiyun 		__u32 error_code;
325*4882a593Smuzhiyun 	} exception;
326*4882a593Smuzhiyun 	struct {
327*4882a593Smuzhiyun 		__u8 injected;
328*4882a593Smuzhiyun 		__u8 nr;
329*4882a593Smuzhiyun 		__u8 soft;
330*4882a593Smuzhiyun 		__u8 shadow;
331*4882a593Smuzhiyun 	} interrupt;
332*4882a593Smuzhiyun 	struct {
333*4882a593Smuzhiyun 		__u8 injected;
334*4882a593Smuzhiyun 		__u8 pending;
335*4882a593Smuzhiyun 		__u8 masked;
336*4882a593Smuzhiyun 		__u8 pad;
337*4882a593Smuzhiyun 	} nmi;
338*4882a593Smuzhiyun 	__u32 sipi_vector;
339*4882a593Smuzhiyun 	__u32 flags;
340*4882a593Smuzhiyun 	struct {
341*4882a593Smuzhiyun 		__u8 smm;
342*4882a593Smuzhiyun 		__u8 pending;
343*4882a593Smuzhiyun 		__u8 smm_inside_nmi;
344*4882a593Smuzhiyun 		__u8 latched_init;
345*4882a593Smuzhiyun 	} smi;
346*4882a593Smuzhiyun 	__u8 reserved[27];
347*4882a593Smuzhiyun 	__u8 exception_has_payload;
348*4882a593Smuzhiyun 	__u64 exception_payload;
349*4882a593Smuzhiyun };
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun /* for KVM_GET/SET_DEBUGREGS */
352*4882a593Smuzhiyun struct kvm_debugregs {
353*4882a593Smuzhiyun 	__u64 db[4];
354*4882a593Smuzhiyun 	__u64 dr6;
355*4882a593Smuzhiyun 	__u64 dr7;
356*4882a593Smuzhiyun 	__u64 flags;
357*4882a593Smuzhiyun 	__u64 reserved[9];
358*4882a593Smuzhiyun };
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun /* for KVM_CAP_XSAVE */
361*4882a593Smuzhiyun struct kvm_xsave {
362*4882a593Smuzhiyun 	__u32 region[1024];
363*4882a593Smuzhiyun };
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun #define KVM_MAX_XCRS	16
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun struct kvm_xcr {
368*4882a593Smuzhiyun 	__u32 xcr;
369*4882a593Smuzhiyun 	__u32 reserved;
370*4882a593Smuzhiyun 	__u64 value;
371*4882a593Smuzhiyun };
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun struct kvm_xcrs {
374*4882a593Smuzhiyun 	__u32 nr_xcrs;
375*4882a593Smuzhiyun 	__u32 flags;
376*4882a593Smuzhiyun 	struct kvm_xcr xcrs[KVM_MAX_XCRS];
377*4882a593Smuzhiyun 	__u64 padding[16];
378*4882a593Smuzhiyun };
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun #define KVM_SYNC_X86_REGS      (1UL << 0)
381*4882a593Smuzhiyun #define KVM_SYNC_X86_SREGS     (1UL << 1)
382*4882a593Smuzhiyun #define KVM_SYNC_X86_EVENTS    (1UL << 2)
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun #define KVM_SYNC_X86_VALID_FIELDS \
385*4882a593Smuzhiyun 	(KVM_SYNC_X86_REGS| \
386*4882a593Smuzhiyun 	 KVM_SYNC_X86_SREGS| \
387*4882a593Smuzhiyun 	 KVM_SYNC_X86_EVENTS)
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun /* kvm_sync_regs struct included by kvm_run struct */
390*4882a593Smuzhiyun struct kvm_sync_regs {
391*4882a593Smuzhiyun 	/* Members of this structure are potentially malicious.
392*4882a593Smuzhiyun 	 * Care must be taken by code reading, esp. interpreting,
393*4882a593Smuzhiyun 	 * data fields from them inside KVM to prevent TOCTOU and
394*4882a593Smuzhiyun 	 * double-fetch types of vulnerabilities.
395*4882a593Smuzhiyun 	 */
396*4882a593Smuzhiyun 	struct kvm_regs regs;
397*4882a593Smuzhiyun 	struct kvm_sregs sregs;
398*4882a593Smuzhiyun 	struct kvm_vcpu_events events;
399*4882a593Smuzhiyun };
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun #define KVM_X86_QUIRK_LINT0_REENABLED	   (1 << 0)
402*4882a593Smuzhiyun #define KVM_X86_QUIRK_CD_NW_CLEARED	   (1 << 1)
403*4882a593Smuzhiyun #define KVM_X86_QUIRK_LAPIC_MMIO_HOLE	   (1 << 2)
404*4882a593Smuzhiyun #define KVM_X86_QUIRK_OUT_7E_INC_RIP	   (1 << 3)
405*4882a593Smuzhiyun #define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT (1 << 4)
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun #define KVM_STATE_NESTED_FORMAT_VMX	0
408*4882a593Smuzhiyun #define KVM_STATE_NESTED_FORMAT_SVM	1
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun #define KVM_STATE_NESTED_GUEST_MODE	0x00000001
411*4882a593Smuzhiyun #define KVM_STATE_NESTED_RUN_PENDING	0x00000002
412*4882a593Smuzhiyun #define KVM_STATE_NESTED_EVMCS		0x00000004
413*4882a593Smuzhiyun #define KVM_STATE_NESTED_MTF_PENDING	0x00000008
414*4882a593Smuzhiyun #define KVM_STATE_NESTED_GIF_SET	0x00000100
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun #define KVM_STATE_NESTED_SMM_GUEST_MODE	0x00000001
417*4882a593Smuzhiyun #define KVM_STATE_NESTED_SMM_VMXON	0x00000002
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun #define KVM_STATE_NESTED_VMX_VMCS_SIZE	0x1000
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun #define KVM_STATE_NESTED_SVM_VMCB_SIZE	0x1000
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun #define KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE	0x00000001
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun struct kvm_vmx_nested_state_data {
426*4882a593Smuzhiyun 	__u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
427*4882a593Smuzhiyun 	__u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
428*4882a593Smuzhiyun };
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun struct kvm_vmx_nested_state_hdr {
431*4882a593Smuzhiyun 	__u64 vmxon_pa;
432*4882a593Smuzhiyun 	__u64 vmcs12_pa;
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	struct {
435*4882a593Smuzhiyun 		__u16 flags;
436*4882a593Smuzhiyun 	} smm;
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	__u32 flags;
439*4882a593Smuzhiyun 	__u64 preemption_timer_deadline;
440*4882a593Smuzhiyun };
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun struct kvm_svm_nested_state_data {
443*4882a593Smuzhiyun 	/* Save area only used if KVM_STATE_NESTED_RUN_PENDING.  */
444*4882a593Smuzhiyun 	__u8 vmcb12[KVM_STATE_NESTED_SVM_VMCB_SIZE];
445*4882a593Smuzhiyun };
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun struct kvm_svm_nested_state_hdr {
448*4882a593Smuzhiyun 	__u64 vmcb_pa;
449*4882a593Smuzhiyun };
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun /* for KVM_CAP_NESTED_STATE */
452*4882a593Smuzhiyun struct kvm_nested_state {
453*4882a593Smuzhiyun 	__u16 flags;
454*4882a593Smuzhiyun 	__u16 format;
455*4882a593Smuzhiyun 	__u32 size;
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 	union {
458*4882a593Smuzhiyun 		struct kvm_vmx_nested_state_hdr vmx;
459*4882a593Smuzhiyun 		struct kvm_svm_nested_state_hdr svm;
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 		/* Pad the header to 128 bytes.  */
462*4882a593Smuzhiyun 		__u8 pad[120];
463*4882a593Smuzhiyun 	} hdr;
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	/*
466*4882a593Smuzhiyun 	 * Define data region as 0 bytes to preserve backwards-compatability
467*4882a593Smuzhiyun 	 * to old definition of kvm_nested_state in order to avoid changing
468*4882a593Smuzhiyun 	 * KVM_{GET,PUT}_NESTED_STATE ioctl values.
469*4882a593Smuzhiyun 	 */
470*4882a593Smuzhiyun 	union {
471*4882a593Smuzhiyun 		struct kvm_vmx_nested_state_data vmx[0];
472*4882a593Smuzhiyun 		struct kvm_svm_nested_state_data svm[0];
473*4882a593Smuzhiyun 	} data;
474*4882a593Smuzhiyun };
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun /* for KVM_CAP_PMU_EVENT_FILTER */
477*4882a593Smuzhiyun struct kvm_pmu_event_filter {
478*4882a593Smuzhiyun 	__u32 action;
479*4882a593Smuzhiyun 	__u32 nevents;
480*4882a593Smuzhiyun 	__u32 fixed_counter_bitmap;
481*4882a593Smuzhiyun 	__u32 flags;
482*4882a593Smuzhiyun 	__u32 pad[4];
483*4882a593Smuzhiyun 	__u64 events[0];
484*4882a593Smuzhiyun };
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun #define KVM_PMU_EVENT_ALLOW 0
487*4882a593Smuzhiyun #define KVM_PMU_EVENT_DENY 1
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun #endif /* _ASM_X86_KVM_H */
490