1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * tools/testing/selftests/kvm/include/kvm_util.h
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2018, Google LLC.
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun #ifndef SELFTEST_KVM_UTIL_H
8*4882a593Smuzhiyun #define SELFTEST_KVM_UTIL_H
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include "test_util.h"
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include "asm/kvm.h"
13*4882a593Smuzhiyun #include "linux/list.h"
14*4882a593Smuzhiyun #include "linux/kvm.h"
15*4882a593Smuzhiyun #include <sys/ioctl.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include "sparsebit.h"
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun /*
21*4882a593Smuzhiyun * Callers of kvm_util only have an incomplete/opaque description of the
22*4882a593Smuzhiyun * structure kvm_util is using to maintain the state of a VM.
23*4882a593Smuzhiyun */
24*4882a593Smuzhiyun struct kvm_vm;
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun typedef uint64_t vm_paddr_t; /* Virtual Machine (Guest) physical address */
27*4882a593Smuzhiyun typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun /* Minimum allocated guest virtual and physical addresses */
30*4882a593Smuzhiyun #define KVM_UTIL_MIN_VADDR 0x2000
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #define DEFAULT_GUEST_PHY_PAGES 512
33*4882a593Smuzhiyun #define DEFAULT_GUEST_STACK_VADDR_MIN 0xab6000
34*4882a593Smuzhiyun #define DEFAULT_STACK_PGS 5
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun enum vm_guest_mode {
37*4882a593Smuzhiyun VM_MODE_P52V48_4K,
38*4882a593Smuzhiyun VM_MODE_P52V48_64K,
39*4882a593Smuzhiyun VM_MODE_P48V48_4K,
40*4882a593Smuzhiyun VM_MODE_P48V48_64K,
41*4882a593Smuzhiyun VM_MODE_P40V48_4K,
42*4882a593Smuzhiyun VM_MODE_P40V48_64K,
43*4882a593Smuzhiyun VM_MODE_PXXV48_4K, /* For 48bits VA but ANY bits PA */
44*4882a593Smuzhiyun NUM_VM_MODES,
45*4882a593Smuzhiyun };
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun #if defined(__aarch64__)
48*4882a593Smuzhiyun #define VM_MODE_DEFAULT VM_MODE_P40V48_4K
49*4882a593Smuzhiyun #elif defined(__x86_64__)
50*4882a593Smuzhiyun #define VM_MODE_DEFAULT VM_MODE_PXXV48_4K
51*4882a593Smuzhiyun #else
52*4882a593Smuzhiyun #define VM_MODE_DEFAULT VM_MODE_P52V48_4K
53*4882a593Smuzhiyun #endif
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun #define vm_guest_mode_string(m) vm_guest_mode_string[m]
56*4882a593Smuzhiyun extern const char * const vm_guest_mode_string[];
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun enum vm_mem_backing_src_type {
59*4882a593Smuzhiyun VM_MEM_SRC_ANONYMOUS,
60*4882a593Smuzhiyun VM_MEM_SRC_ANONYMOUS_THP,
61*4882a593Smuzhiyun VM_MEM_SRC_ANONYMOUS_HUGETLB,
62*4882a593Smuzhiyun };
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun int kvm_check_cap(long cap);
65*4882a593Smuzhiyun int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap);
66*4882a593Smuzhiyun int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id,
67*4882a593Smuzhiyun struct kvm_enable_cap *cap);
68*4882a593Smuzhiyun void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm);
71*4882a593Smuzhiyun void kvm_vm_free(struct kvm_vm *vmp);
72*4882a593Smuzhiyun void kvm_vm_restart(struct kvm_vm *vmp, int perm);
73*4882a593Smuzhiyun void kvm_vm_release(struct kvm_vm *vmp);
74*4882a593Smuzhiyun void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log);
75*4882a593Smuzhiyun void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
76*4882a593Smuzhiyun uint64_t first_page, uint32_t num_pages);
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva,
79*4882a593Smuzhiyun size_t len);
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename,
82*4882a593Smuzhiyun uint32_t data_memslot, uint32_t pgd_memslot);
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /*
87*4882a593Smuzhiyun * VM VCPU Dump
88*4882a593Smuzhiyun *
89*4882a593Smuzhiyun * Input Args:
90*4882a593Smuzhiyun * stream - Output FILE stream
91*4882a593Smuzhiyun * vm - Virtual Machine
92*4882a593Smuzhiyun * vcpuid - VCPU ID
93*4882a593Smuzhiyun * indent - Left margin indent amount
94*4882a593Smuzhiyun *
95*4882a593Smuzhiyun * Output Args: None
96*4882a593Smuzhiyun *
97*4882a593Smuzhiyun * Return: None
98*4882a593Smuzhiyun *
99*4882a593Smuzhiyun * Dumps the current state of the VCPU specified by @vcpuid, within the VM
100*4882a593Smuzhiyun * given by @vm, to the FILE stream given by @stream.
101*4882a593Smuzhiyun */
102*4882a593Smuzhiyun void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid,
103*4882a593Smuzhiyun uint8_t indent);
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun void vm_create_irqchip(struct kvm_vm *vm);
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun void vm_userspace_mem_region_add(struct kvm_vm *vm,
108*4882a593Smuzhiyun enum vm_mem_backing_src_type src_type,
109*4882a593Smuzhiyun uint64_t guest_paddr, uint32_t slot, uint64_t npages,
110*4882a593Smuzhiyun uint32_t flags);
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long ioctl,
113*4882a593Smuzhiyun void *arg);
114*4882a593Smuzhiyun int _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long ioctl,
115*4882a593Smuzhiyun void *arg);
116*4882a593Smuzhiyun void vm_ioctl(struct kvm_vm *vm, unsigned long ioctl, void *arg);
117*4882a593Smuzhiyun void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
118*4882a593Smuzhiyun void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
119*4882a593Smuzhiyun void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
120*4882a593Smuzhiyun void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid);
121*4882a593Smuzhiyun vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
122*4882a593Smuzhiyun uint32_t data_memslot, uint32_t pgd_memslot);
123*4882a593Smuzhiyun void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
124*4882a593Smuzhiyun unsigned int npages, uint32_t pgd_memslot);
125*4882a593Smuzhiyun void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
126*4882a593Smuzhiyun void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
127*4882a593Smuzhiyun vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun /*
130*4882a593Smuzhiyun * Address Guest Virtual to Guest Physical
131*4882a593Smuzhiyun *
132*4882a593Smuzhiyun * Input Args:
133*4882a593Smuzhiyun * vm - Virtual Machine
134*4882a593Smuzhiyun * gva - VM virtual address
135*4882a593Smuzhiyun *
136*4882a593Smuzhiyun * Output Args: None
137*4882a593Smuzhiyun *
138*4882a593Smuzhiyun * Return:
139*4882a593Smuzhiyun * Equivalent VM physical address
140*4882a593Smuzhiyun *
141*4882a593Smuzhiyun * Returns the VM physical address of the translated VM virtual
142*4882a593Smuzhiyun * address given by @gva.
143*4882a593Smuzhiyun */
144*4882a593Smuzhiyun vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid);
147*4882a593Smuzhiyun void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
148*4882a593Smuzhiyun int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
149*4882a593Smuzhiyun void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid);
150*4882a593Smuzhiyun void vcpu_set_guest_debug(struct kvm_vm *vm, uint32_t vcpuid,
151*4882a593Smuzhiyun struct kvm_guest_debug *debug);
152*4882a593Smuzhiyun void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
153*4882a593Smuzhiyun struct kvm_mp_state *mp_state);
154*4882a593Smuzhiyun struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vm *vm, uint32_t vcpuid);
155*4882a593Smuzhiyun void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs);
156*4882a593Smuzhiyun void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs);
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun /*
159*4882a593Smuzhiyun * VM VCPU Args Set
160*4882a593Smuzhiyun *
161*4882a593Smuzhiyun * Input Args:
162*4882a593Smuzhiyun * vm - Virtual Machine
163*4882a593Smuzhiyun * vcpuid - VCPU ID
164*4882a593Smuzhiyun * num - number of arguments
165*4882a593Smuzhiyun * ... - arguments, each of type uint64_t
166*4882a593Smuzhiyun *
167*4882a593Smuzhiyun * Output Args: None
168*4882a593Smuzhiyun *
169*4882a593Smuzhiyun * Return: None
170*4882a593Smuzhiyun *
171*4882a593Smuzhiyun * Sets the first @num function input registers of the VCPU with @vcpuid,
172*4882a593Smuzhiyun * per the C calling convention of the architecture, to the values given
173*4882a593Smuzhiyun * as variable args. Each of the variable args is expected to be of type
174*4882a593Smuzhiyun * uint64_t. The maximum @num can be is specific to the architecture.
175*4882a593Smuzhiyun */
176*4882a593Smuzhiyun void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...);
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun void vcpu_sregs_get(struct kvm_vm *vm, uint32_t vcpuid,
179*4882a593Smuzhiyun struct kvm_sregs *sregs);
180*4882a593Smuzhiyun void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid,
181*4882a593Smuzhiyun struct kvm_sregs *sregs);
182*4882a593Smuzhiyun int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid,
183*4882a593Smuzhiyun struct kvm_sregs *sregs);
184*4882a593Smuzhiyun void vcpu_fpu_get(struct kvm_vm *vm, uint32_t vcpuid,
185*4882a593Smuzhiyun struct kvm_fpu *fpu);
186*4882a593Smuzhiyun void vcpu_fpu_set(struct kvm_vm *vm, uint32_t vcpuid,
187*4882a593Smuzhiyun struct kvm_fpu *fpu);
188*4882a593Smuzhiyun void vcpu_get_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg);
189*4882a593Smuzhiyun void vcpu_set_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg);
190*4882a593Smuzhiyun #ifdef __KVM_HAVE_VCPU_EVENTS
191*4882a593Smuzhiyun void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid,
192*4882a593Smuzhiyun struct kvm_vcpu_events *events);
193*4882a593Smuzhiyun void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid,
194*4882a593Smuzhiyun struct kvm_vcpu_events *events);
195*4882a593Smuzhiyun #endif
196*4882a593Smuzhiyun #ifdef __x86_64__
197*4882a593Smuzhiyun void vcpu_nested_state_get(struct kvm_vm *vm, uint32_t vcpuid,
198*4882a593Smuzhiyun struct kvm_nested_state *state);
199*4882a593Smuzhiyun int vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid,
200*4882a593Smuzhiyun struct kvm_nested_state *state, bool ignore_error);
201*4882a593Smuzhiyun #endif
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun const char *exit_reason_str(unsigned int exit_reason);
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot);
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /*
208*4882a593Smuzhiyun * VM Virtual Page Map
209*4882a593Smuzhiyun *
210*4882a593Smuzhiyun * Input Args:
211*4882a593Smuzhiyun * vm - Virtual Machine
212*4882a593Smuzhiyun * vaddr - VM Virtual Address
213*4882a593Smuzhiyun * paddr - VM Physical Address
214*4882a593Smuzhiyun * memslot - Memory region slot for new virtual translation tables
215*4882a593Smuzhiyun *
216*4882a593Smuzhiyun * Output Args: None
217*4882a593Smuzhiyun *
218*4882a593Smuzhiyun * Return: None
219*4882a593Smuzhiyun *
220*4882a593Smuzhiyun * Within @vm, creates a virtual translation for the page starting
221*4882a593Smuzhiyun * at @vaddr to the page starting at @paddr.
222*4882a593Smuzhiyun */
223*4882a593Smuzhiyun void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
224*4882a593Smuzhiyun uint32_t memslot);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
227*4882a593Smuzhiyun uint32_t memslot);
228*4882a593Smuzhiyun vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
229*4882a593Smuzhiyun vm_paddr_t paddr_min, uint32_t memslot);
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun /*
232*4882a593Smuzhiyun * Create a VM with reasonable defaults
233*4882a593Smuzhiyun *
234*4882a593Smuzhiyun * Input Args:
235*4882a593Smuzhiyun * vcpuid - The id of the single VCPU to add to the VM.
236*4882a593Smuzhiyun * extra_mem_pages - The number of extra pages to add (this will
237*4882a593Smuzhiyun * decide how much extra space we will need to
238*4882a593Smuzhiyun * setup the page tables using memslot 0)
239*4882a593Smuzhiyun * guest_code - The vCPU's entry point
240*4882a593Smuzhiyun *
241*4882a593Smuzhiyun * Output Args: None
242*4882a593Smuzhiyun *
243*4882a593Smuzhiyun * Return:
244*4882a593Smuzhiyun * Pointer to opaque structure that describes the created VM.
245*4882a593Smuzhiyun */
246*4882a593Smuzhiyun struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
247*4882a593Smuzhiyun void *guest_code);
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun /*
250*4882a593Smuzhiyun * Adds a vCPU with reasonable defaults (e.g. a stack)
251*4882a593Smuzhiyun *
252*4882a593Smuzhiyun * Input Args:
253*4882a593Smuzhiyun * vm - Virtual Machine
254*4882a593Smuzhiyun * vcpuid - The id of the VCPU to add to the VM.
255*4882a593Smuzhiyun * guest_code - The vCPU's entry point
256*4882a593Smuzhiyun */
257*4882a593Smuzhiyun void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code);
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun bool vm_is_unrestricted_guest(struct kvm_vm *vm);
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun unsigned int vm_get_page_size(struct kvm_vm *vm);
262*4882a593Smuzhiyun unsigned int vm_get_page_shift(struct kvm_vm *vm);
263*4882a593Smuzhiyun unsigned int vm_get_max_gfn(struct kvm_vm *vm);
264*4882a593Smuzhiyun int vm_get_fd(struct kvm_vm *vm);
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size);
267*4882a593Smuzhiyun unsigned int vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages);
268*4882a593Smuzhiyun unsigned int vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages);
269*4882a593Smuzhiyun static inline unsigned int
vm_adjust_num_guest_pages(enum vm_guest_mode mode,unsigned int num_guest_pages)270*4882a593Smuzhiyun vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
271*4882a593Smuzhiyun {
272*4882a593Smuzhiyun unsigned int n;
273*4882a593Smuzhiyun n = vm_num_guest_pages(mode, vm_num_host_pages(mode, num_guest_pages));
274*4882a593Smuzhiyun #ifdef __s390x__
275*4882a593Smuzhiyun /* s390 requires 1M aligned guest sizes */
276*4882a593Smuzhiyun n = (n + 255) & ~255;
277*4882a593Smuzhiyun #endif
278*4882a593Smuzhiyun return n;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun struct kvm_userspace_memory_region *
282*4882a593Smuzhiyun kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
283*4882a593Smuzhiyun uint64_t end);
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun struct kvm_dirty_log *
286*4882a593Smuzhiyun allocate_kvm_dirty_log(struct kvm_userspace_memory_region *region);
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun int vm_create_device(struct kvm_vm *vm, struct kvm_create_device *cd);
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun #define sync_global_to_guest(vm, g) ({ \
291*4882a593Smuzhiyun typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
292*4882a593Smuzhiyun memcpy(_p, &(g), sizeof(g)); \
293*4882a593Smuzhiyun })
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun #define sync_global_from_guest(vm, g) ({ \
296*4882a593Smuzhiyun typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
297*4882a593Smuzhiyun memcpy(&(g), _p, sizeof(g)); \
298*4882a593Smuzhiyun })
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid);
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun /* Common ucalls */
303*4882a593Smuzhiyun enum {
304*4882a593Smuzhiyun UCALL_NONE,
305*4882a593Smuzhiyun UCALL_SYNC,
306*4882a593Smuzhiyun UCALL_ABORT,
307*4882a593Smuzhiyun UCALL_DONE,
308*4882a593Smuzhiyun };
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun #define UCALL_MAX_ARGS 6
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun struct ucall {
313*4882a593Smuzhiyun uint64_t cmd;
314*4882a593Smuzhiyun uint64_t args[UCALL_MAX_ARGS];
315*4882a593Smuzhiyun };
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun void ucall_init(struct kvm_vm *vm, void *arg);
318*4882a593Smuzhiyun void ucall_uninit(struct kvm_vm *vm);
319*4882a593Smuzhiyun void ucall(uint64_t cmd, int nargs, ...);
320*4882a593Smuzhiyun uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc);
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun #define GUEST_SYNC_ARGS(stage, arg1, arg2, arg3, arg4) \
323*4882a593Smuzhiyun ucall(UCALL_SYNC, 6, "hello", stage, arg1, arg2, arg3, arg4)
324*4882a593Smuzhiyun #define GUEST_SYNC(stage) ucall(UCALL_SYNC, 2, "hello", stage)
325*4882a593Smuzhiyun #define GUEST_DONE() ucall(UCALL_DONE, 0)
326*4882a593Smuzhiyun #define __GUEST_ASSERT(_condition, _nargs, _args...) do { \
327*4882a593Smuzhiyun if (!(_condition)) \
328*4882a593Smuzhiyun ucall(UCALL_ABORT, 2 + _nargs, \
329*4882a593Smuzhiyun "Failed guest assert: " \
330*4882a593Smuzhiyun #_condition, __LINE__, _args); \
331*4882a593Smuzhiyun } while (0)
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun #define GUEST_ASSERT(_condition) \
334*4882a593Smuzhiyun __GUEST_ASSERT((_condition), 0, 0)
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun #define GUEST_ASSERT_1(_condition, arg1) \
337*4882a593Smuzhiyun __GUEST_ASSERT((_condition), 1, (arg1))
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun #define GUEST_ASSERT_2(_condition, arg1, arg2) \
340*4882a593Smuzhiyun __GUEST_ASSERT((_condition), 2, (arg1), (arg2))
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun #define GUEST_ASSERT_3(_condition, arg1, arg2, arg3) \
343*4882a593Smuzhiyun __GUEST_ASSERT((_condition), 3, (arg1), (arg2), (arg3))
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun #define GUEST_ASSERT_4(_condition, arg1, arg2, arg3, arg4) \
346*4882a593Smuzhiyun __GUEST_ASSERT((_condition), 4, (arg1), (arg2), (arg3), (arg4))
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun #endif /* SELFTEST_KVM_UTIL_H */
349