1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * tools/testing/selftests/kvm/lib/kvm_util.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2018, Google LLC.
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include "test_util.h"
9*4882a593Smuzhiyun #include "kvm_util.h"
10*4882a593Smuzhiyun #include "kvm_util_internal.h"
11*4882a593Smuzhiyun #include "processor.h"
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <assert.h>
14*4882a593Smuzhiyun #include <sys/mman.h>
15*4882a593Smuzhiyun #include <sys/types.h>
16*4882a593Smuzhiyun #include <sys/stat.h>
17*4882a593Smuzhiyun #include <unistd.h>
18*4882a593Smuzhiyun #include <linux/kernel.h>
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #define KVM_UTIL_PGS_PER_HUGEPG 512
21*4882a593Smuzhiyun #define KVM_UTIL_MIN_PFN 2
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun /* Aligns x up to the next multiple of size. Size must be a power of 2. */
align(void * x,size_t size)24*4882a593Smuzhiyun static void *align(void *x, size_t size)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun size_t mask = size - 1;
27*4882a593Smuzhiyun TEST_ASSERT(size != 0 && !(size & (size - 1)),
28*4882a593Smuzhiyun "size not a power of 2: %lu", size);
29*4882a593Smuzhiyun return (void *) (((size_t) x + mask) & ~mask);
30*4882a593Smuzhiyun }
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun * Capability
34*4882a593Smuzhiyun *
35*4882a593Smuzhiyun * Input Args:
36*4882a593Smuzhiyun * cap - Capability
37*4882a593Smuzhiyun *
38*4882a593Smuzhiyun * Output Args: None
39*4882a593Smuzhiyun *
40*4882a593Smuzhiyun * Return:
41*4882a593Smuzhiyun * On success, the Value corresponding to the capability (KVM_CAP_*)
42*4882a593Smuzhiyun * specified by the value of cap. On failure a TEST_ASSERT failure
43*4882a593Smuzhiyun * is produced.
44*4882a593Smuzhiyun *
45*4882a593Smuzhiyun * Looks up and returns the value corresponding to the capability
46*4882a593Smuzhiyun * (KVM_CAP_*) given by cap.
47*4882a593Smuzhiyun */
kvm_check_cap(long cap)48*4882a593Smuzhiyun int kvm_check_cap(long cap)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun int ret;
51*4882a593Smuzhiyun int kvm_fd;
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
54*4882a593Smuzhiyun if (kvm_fd < 0)
55*4882a593Smuzhiyun exit(KSFT_SKIP);
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap);
58*4882a593Smuzhiyun TEST_ASSERT(ret >= 0, "KVM_CHECK_EXTENSION IOCTL failed,\n"
59*4882a593Smuzhiyun " rc: %i errno: %i", ret, errno);
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun close(kvm_fd);
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun return ret;
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /* VM Enable Capability
67*4882a593Smuzhiyun *
68*4882a593Smuzhiyun * Input Args:
69*4882a593Smuzhiyun * vm - Virtual Machine
70*4882a593Smuzhiyun * cap - Capability
71*4882a593Smuzhiyun *
72*4882a593Smuzhiyun * Output Args: None
73*4882a593Smuzhiyun *
74*4882a593Smuzhiyun * Return: On success, 0. On failure a TEST_ASSERT failure is produced.
75*4882a593Smuzhiyun *
76*4882a593Smuzhiyun * Enables a capability (KVM_CAP_*) on the VM.
77*4882a593Smuzhiyun */
vm_enable_cap(struct kvm_vm * vm,struct kvm_enable_cap * cap)78*4882a593Smuzhiyun int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun int ret;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun ret = ioctl(vm->fd, KVM_ENABLE_CAP, cap);
83*4882a593Smuzhiyun TEST_ASSERT(ret == 0, "KVM_ENABLE_CAP IOCTL failed,\n"
84*4882a593Smuzhiyun " rc: %i errno: %i", ret, errno);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun return ret;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun /* VCPU Enable Capability
90*4882a593Smuzhiyun *
91*4882a593Smuzhiyun * Input Args:
92*4882a593Smuzhiyun * vm - Virtual Machine
93*4882a593Smuzhiyun * vcpu_id - VCPU
94*4882a593Smuzhiyun * cap - Capability
95*4882a593Smuzhiyun *
96*4882a593Smuzhiyun * Output Args: None
97*4882a593Smuzhiyun *
98*4882a593Smuzhiyun * Return: On success, 0. On failure a TEST_ASSERT failure is produced.
99*4882a593Smuzhiyun *
100*4882a593Smuzhiyun * Enables a capability (KVM_CAP_*) on the VCPU.
101*4882a593Smuzhiyun */
vcpu_enable_cap(struct kvm_vm * vm,uint32_t vcpu_id,struct kvm_enable_cap * cap)102*4882a593Smuzhiyun int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id,
103*4882a593Smuzhiyun struct kvm_enable_cap *cap)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun struct vcpu *vcpu = vcpu_find(vm, vcpu_id);
106*4882a593Smuzhiyun int r;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun TEST_ASSERT(vcpu, "cannot find vcpu %d", vcpu_id);
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun r = ioctl(vcpu->fd, KVM_ENABLE_CAP, cap);
111*4882a593Smuzhiyun TEST_ASSERT(!r, "KVM_ENABLE_CAP vCPU ioctl failed,\n"
112*4882a593Smuzhiyun " rc: %i, errno: %i", r, errno);
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun return r;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
vm_open(struct kvm_vm * vm,int perm)117*4882a593Smuzhiyun static void vm_open(struct kvm_vm *vm, int perm)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun vm->kvm_fd = open(KVM_DEV_PATH, perm);
120*4882a593Smuzhiyun if (vm->kvm_fd < 0)
121*4882a593Smuzhiyun exit(KSFT_SKIP);
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) {
124*4882a593Smuzhiyun print_skip("immediate_exit not available");
125*4882a593Smuzhiyun exit(KSFT_SKIP);
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, vm->type);
129*4882a593Smuzhiyun TEST_ASSERT(vm->fd >= 0, "KVM_CREATE_VM ioctl failed, "
130*4882a593Smuzhiyun "rc: %i errno: %i", vm->fd, errno);
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun const char * const vm_guest_mode_string[] = {
134*4882a593Smuzhiyun "PA-bits:52, VA-bits:48, 4K pages",
135*4882a593Smuzhiyun "PA-bits:52, VA-bits:48, 64K pages",
136*4882a593Smuzhiyun "PA-bits:48, VA-bits:48, 4K pages",
137*4882a593Smuzhiyun "PA-bits:48, VA-bits:48, 64K pages",
138*4882a593Smuzhiyun "PA-bits:40, VA-bits:48, 4K pages",
139*4882a593Smuzhiyun "PA-bits:40, VA-bits:48, 64K pages",
140*4882a593Smuzhiyun "PA-bits:ANY, VA-bits:48, 4K pages",
141*4882a593Smuzhiyun };
142*4882a593Smuzhiyun _Static_assert(sizeof(vm_guest_mode_string)/sizeof(char *) == NUM_VM_MODES,
143*4882a593Smuzhiyun "Missing new mode strings?");
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun struct vm_guest_mode_params {
146*4882a593Smuzhiyun unsigned int pa_bits;
147*4882a593Smuzhiyun unsigned int va_bits;
148*4882a593Smuzhiyun unsigned int page_size;
149*4882a593Smuzhiyun unsigned int page_shift;
150*4882a593Smuzhiyun };
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun static const struct vm_guest_mode_params vm_guest_mode_params[] = {
153*4882a593Smuzhiyun { 52, 48, 0x1000, 12 },
154*4882a593Smuzhiyun { 52, 48, 0x10000, 16 },
155*4882a593Smuzhiyun { 48, 48, 0x1000, 12 },
156*4882a593Smuzhiyun { 48, 48, 0x10000, 16 },
157*4882a593Smuzhiyun { 40, 48, 0x1000, 12 },
158*4882a593Smuzhiyun { 40, 48, 0x10000, 16 },
159*4882a593Smuzhiyun { 0, 0, 0x1000, 12 },
160*4882a593Smuzhiyun };
161*4882a593Smuzhiyun _Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,
162*4882a593Smuzhiyun "Missing new mode params?");
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun /*
165*4882a593Smuzhiyun * VM Create
166*4882a593Smuzhiyun *
167*4882a593Smuzhiyun * Input Args:
168*4882a593Smuzhiyun * mode - VM Mode (e.g. VM_MODE_P52V48_4K)
169*4882a593Smuzhiyun * phy_pages - Physical memory pages
170*4882a593Smuzhiyun * perm - permission
171*4882a593Smuzhiyun *
172*4882a593Smuzhiyun * Output Args: None
173*4882a593Smuzhiyun *
174*4882a593Smuzhiyun * Return:
175*4882a593Smuzhiyun * Pointer to opaque structure that describes the created VM.
176*4882a593Smuzhiyun *
177*4882a593Smuzhiyun * Creates a VM with the mode specified by mode (e.g. VM_MODE_P52V48_4K).
178*4882a593Smuzhiyun * When phy_pages is non-zero, a memory region of phy_pages physical pages
179*4882a593Smuzhiyun * is created and mapped starting at guest physical address 0. The file
180*4882a593Smuzhiyun * descriptor to control the created VM is created with the permissions
181*4882a593Smuzhiyun * given by perm (e.g. O_RDWR).
182*4882a593Smuzhiyun */
vm_create(enum vm_guest_mode mode,uint64_t phy_pages,int perm)183*4882a593Smuzhiyun struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun struct kvm_vm *vm;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun pr_debug("%s: mode='%s' pages='%ld' perm='%d'\n", __func__,
188*4882a593Smuzhiyun vm_guest_mode_string(mode), phy_pages, perm);
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun vm = calloc(1, sizeof(*vm));
191*4882a593Smuzhiyun TEST_ASSERT(vm != NULL, "Insufficient Memory");
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun INIT_LIST_HEAD(&vm->vcpus);
194*4882a593Smuzhiyun INIT_LIST_HEAD(&vm->userspace_mem_regions);
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun vm->mode = mode;
197*4882a593Smuzhiyun vm->type = 0;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun vm->pa_bits = vm_guest_mode_params[mode].pa_bits;
200*4882a593Smuzhiyun vm->va_bits = vm_guest_mode_params[mode].va_bits;
201*4882a593Smuzhiyun vm->page_size = vm_guest_mode_params[mode].page_size;
202*4882a593Smuzhiyun vm->page_shift = vm_guest_mode_params[mode].page_shift;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun /* Setup mode specific traits. */
205*4882a593Smuzhiyun switch (vm->mode) {
206*4882a593Smuzhiyun case VM_MODE_P52V48_4K:
207*4882a593Smuzhiyun vm->pgtable_levels = 4;
208*4882a593Smuzhiyun break;
209*4882a593Smuzhiyun case VM_MODE_P52V48_64K:
210*4882a593Smuzhiyun vm->pgtable_levels = 3;
211*4882a593Smuzhiyun break;
212*4882a593Smuzhiyun case VM_MODE_P48V48_4K:
213*4882a593Smuzhiyun vm->pgtable_levels = 4;
214*4882a593Smuzhiyun break;
215*4882a593Smuzhiyun case VM_MODE_P48V48_64K:
216*4882a593Smuzhiyun vm->pgtable_levels = 3;
217*4882a593Smuzhiyun break;
218*4882a593Smuzhiyun case VM_MODE_P40V48_4K:
219*4882a593Smuzhiyun vm->pgtable_levels = 4;
220*4882a593Smuzhiyun break;
221*4882a593Smuzhiyun case VM_MODE_P40V48_64K:
222*4882a593Smuzhiyun vm->pgtable_levels = 3;
223*4882a593Smuzhiyun break;
224*4882a593Smuzhiyun case VM_MODE_PXXV48_4K:
225*4882a593Smuzhiyun #ifdef __x86_64__
226*4882a593Smuzhiyun kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits);
227*4882a593Smuzhiyun /*
228*4882a593Smuzhiyun * Ignore KVM support for 5-level paging (vm->va_bits == 57),
229*4882a593Smuzhiyun * it doesn't take effect unless a CR4.LA57 is set, which it
230*4882a593Smuzhiyun * isn't for this VM_MODE.
231*4882a593Smuzhiyun */
232*4882a593Smuzhiyun TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57,
233*4882a593Smuzhiyun "Linear address width (%d bits) not supported",
234*4882a593Smuzhiyun vm->va_bits);
235*4882a593Smuzhiyun pr_debug("Guest physical address width detected: %d\n",
236*4882a593Smuzhiyun vm->pa_bits);
237*4882a593Smuzhiyun vm->pgtable_levels = 4;
238*4882a593Smuzhiyun vm->va_bits = 48;
239*4882a593Smuzhiyun #else
240*4882a593Smuzhiyun TEST_FAIL("VM_MODE_PXXV48_4K not supported on non-x86 platforms");
241*4882a593Smuzhiyun #endif
242*4882a593Smuzhiyun break;
243*4882a593Smuzhiyun default:
244*4882a593Smuzhiyun TEST_FAIL("Unknown guest mode, mode: 0x%x", mode);
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun #ifdef __aarch64__
248*4882a593Smuzhiyun if (vm->pa_bits != 40)
249*4882a593Smuzhiyun vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits);
250*4882a593Smuzhiyun #endif
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun vm_open(vm, perm);
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun /* Limit to VA-bit canonical virtual addresses. */
255*4882a593Smuzhiyun vm->vpages_valid = sparsebit_alloc();
256*4882a593Smuzhiyun sparsebit_set_num(vm->vpages_valid,
257*4882a593Smuzhiyun 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
258*4882a593Smuzhiyun sparsebit_set_num(vm->vpages_valid,
259*4882a593Smuzhiyun (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift,
260*4882a593Smuzhiyun (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun /* Limit physical addresses to PA-bits. */
263*4882a593Smuzhiyun vm->max_gfn = ((1ULL << vm->pa_bits) >> vm->page_shift) - 1;
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun /* Allocate and setup memory for guest. */
266*4882a593Smuzhiyun vm->vpages_mapped = sparsebit_alloc();
267*4882a593Smuzhiyun if (phy_pages != 0)
268*4882a593Smuzhiyun vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
269*4882a593Smuzhiyun 0, 0, phy_pages, 0);
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun return vm;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun /*
275*4882a593Smuzhiyun * VM Restart
276*4882a593Smuzhiyun *
277*4882a593Smuzhiyun * Input Args:
278*4882a593Smuzhiyun * vm - VM that has been released before
279*4882a593Smuzhiyun * perm - permission
280*4882a593Smuzhiyun *
281*4882a593Smuzhiyun * Output Args: None
282*4882a593Smuzhiyun *
283*4882a593Smuzhiyun * Reopens the file descriptors associated to the VM and reinstates the
284*4882a593Smuzhiyun * global state, such as the irqchip and the memory regions that are mapped
285*4882a593Smuzhiyun * into the guest.
286*4882a593Smuzhiyun */
kvm_vm_restart(struct kvm_vm * vmp,int perm)287*4882a593Smuzhiyun void kvm_vm_restart(struct kvm_vm *vmp, int perm)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun struct userspace_mem_region *region;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun vm_open(vmp, perm);
292*4882a593Smuzhiyun if (vmp->has_irqchip)
293*4882a593Smuzhiyun vm_create_irqchip(vmp);
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun list_for_each_entry(region, &vmp->userspace_mem_regions, list) {
296*4882a593Smuzhiyun int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region);
297*4882a593Smuzhiyun TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
298*4882a593Smuzhiyun " rc: %i errno: %i\n"
299*4882a593Smuzhiyun " slot: %u flags: 0x%x\n"
300*4882a593Smuzhiyun " guest_phys_addr: 0x%llx size: 0x%llx",
301*4882a593Smuzhiyun ret, errno, region->region.slot,
302*4882a593Smuzhiyun region->region.flags,
303*4882a593Smuzhiyun region->region.guest_phys_addr,
304*4882a593Smuzhiyun region->region.memory_size);
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
kvm_vm_get_dirty_log(struct kvm_vm * vm,int slot,void * log)308*4882a593Smuzhiyun void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun struct kvm_dirty_log args = { .dirty_bitmap = log, .slot = slot };
311*4882a593Smuzhiyun int ret;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun ret = ioctl(vm->fd, KVM_GET_DIRTY_LOG, &args);
314*4882a593Smuzhiyun TEST_ASSERT(ret == 0, "%s: KVM_GET_DIRTY_LOG failed: %s",
315*4882a593Smuzhiyun __func__, strerror(-ret));
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
kvm_vm_clear_dirty_log(struct kvm_vm * vm,int slot,void * log,uint64_t first_page,uint32_t num_pages)318*4882a593Smuzhiyun void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
319*4882a593Smuzhiyun uint64_t first_page, uint32_t num_pages)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun struct kvm_clear_dirty_log args = { .dirty_bitmap = log, .slot = slot,
322*4882a593Smuzhiyun .first_page = first_page,
323*4882a593Smuzhiyun .num_pages = num_pages };
324*4882a593Smuzhiyun int ret;
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun ret = ioctl(vm->fd, KVM_CLEAR_DIRTY_LOG, &args);
327*4882a593Smuzhiyun TEST_ASSERT(ret == 0, "%s: KVM_CLEAR_DIRTY_LOG failed: %s",
328*4882a593Smuzhiyun __func__, strerror(-ret));
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun /*
332*4882a593Smuzhiyun * Userspace Memory Region Find
333*4882a593Smuzhiyun *
334*4882a593Smuzhiyun * Input Args:
335*4882a593Smuzhiyun * vm - Virtual Machine
336*4882a593Smuzhiyun * start - Starting VM physical address
337*4882a593Smuzhiyun * end - Ending VM physical address, inclusive.
338*4882a593Smuzhiyun *
339*4882a593Smuzhiyun * Output Args: None
340*4882a593Smuzhiyun *
341*4882a593Smuzhiyun * Return:
342*4882a593Smuzhiyun * Pointer to overlapping region, NULL if no such region.
343*4882a593Smuzhiyun *
344*4882a593Smuzhiyun * Searches for a region with any physical memory that overlaps with
345*4882a593Smuzhiyun * any portion of the guest physical addresses from start to end
346*4882a593Smuzhiyun * inclusive. If multiple overlapping regions exist, a pointer to any
347*4882a593Smuzhiyun * of the regions is returned. Null is returned only when no overlapping
348*4882a593Smuzhiyun * region exists.
349*4882a593Smuzhiyun */
350*4882a593Smuzhiyun static struct userspace_mem_region *
userspace_mem_region_find(struct kvm_vm * vm,uint64_t start,uint64_t end)351*4882a593Smuzhiyun userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun struct userspace_mem_region *region;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun list_for_each_entry(region, &vm->userspace_mem_regions, list) {
356*4882a593Smuzhiyun uint64_t existing_start = region->region.guest_phys_addr;
357*4882a593Smuzhiyun uint64_t existing_end = region->region.guest_phys_addr
358*4882a593Smuzhiyun + region->region.memory_size - 1;
359*4882a593Smuzhiyun if (start <= existing_end && end >= existing_start)
360*4882a593Smuzhiyun return region;
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun return NULL;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun /*
367*4882a593Smuzhiyun * KVM Userspace Memory Region Find
368*4882a593Smuzhiyun *
369*4882a593Smuzhiyun * Input Args:
370*4882a593Smuzhiyun * vm - Virtual Machine
371*4882a593Smuzhiyun * start - Starting VM physical address
372*4882a593Smuzhiyun * end - Ending VM physical address, inclusive.
373*4882a593Smuzhiyun *
374*4882a593Smuzhiyun * Output Args: None
375*4882a593Smuzhiyun *
376*4882a593Smuzhiyun * Return:
377*4882a593Smuzhiyun * Pointer to overlapping region, NULL if no such region.
378*4882a593Smuzhiyun *
379*4882a593Smuzhiyun * Public interface to userspace_mem_region_find. Allows tests to look up
380*4882a593Smuzhiyun * the memslot datastructure for a given range of guest physical memory.
381*4882a593Smuzhiyun */
382*4882a593Smuzhiyun struct kvm_userspace_memory_region *
kvm_userspace_memory_region_find(struct kvm_vm * vm,uint64_t start,uint64_t end)383*4882a593Smuzhiyun kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
384*4882a593Smuzhiyun uint64_t end)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun struct userspace_mem_region *region;
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun region = userspace_mem_region_find(vm, start, end);
389*4882a593Smuzhiyun if (!region)
390*4882a593Smuzhiyun return NULL;
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun return ®ion->region;
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun /*
396*4882a593Smuzhiyun * VCPU Find
397*4882a593Smuzhiyun *
398*4882a593Smuzhiyun * Input Args:
399*4882a593Smuzhiyun * vm - Virtual Machine
400*4882a593Smuzhiyun * vcpuid - VCPU ID
401*4882a593Smuzhiyun *
402*4882a593Smuzhiyun * Output Args: None
403*4882a593Smuzhiyun *
404*4882a593Smuzhiyun * Return:
405*4882a593Smuzhiyun * Pointer to VCPU structure
406*4882a593Smuzhiyun *
407*4882a593Smuzhiyun * Locates a vcpu structure that describes the VCPU specified by vcpuid and
408*4882a593Smuzhiyun * returns a pointer to it. Returns NULL if the VM doesn't contain a VCPU
409*4882a593Smuzhiyun * for the specified vcpuid.
410*4882a593Smuzhiyun */
vcpu_find(struct kvm_vm * vm,uint32_t vcpuid)411*4882a593Smuzhiyun struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid)
412*4882a593Smuzhiyun {
413*4882a593Smuzhiyun struct vcpu *vcpu;
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun list_for_each_entry(vcpu, &vm->vcpus, list) {
416*4882a593Smuzhiyun if (vcpu->id == vcpuid)
417*4882a593Smuzhiyun return vcpu;
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun return NULL;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun /*
424*4882a593Smuzhiyun * VM VCPU Remove
425*4882a593Smuzhiyun *
426*4882a593Smuzhiyun * Input Args:
427*4882a593Smuzhiyun * vcpu - VCPU to remove
428*4882a593Smuzhiyun *
429*4882a593Smuzhiyun * Output Args: None
430*4882a593Smuzhiyun *
431*4882a593Smuzhiyun * Return: None, TEST_ASSERT failures for all error conditions
432*4882a593Smuzhiyun *
433*4882a593Smuzhiyun * Removes a vCPU from a VM and frees its resources.
434*4882a593Smuzhiyun */
vm_vcpu_rm(struct vcpu * vcpu)435*4882a593Smuzhiyun static void vm_vcpu_rm(struct vcpu *vcpu)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun int ret;
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun ret = munmap(vcpu->state, sizeof(*vcpu->state));
440*4882a593Smuzhiyun TEST_ASSERT(ret == 0, "munmap of VCPU fd failed, rc: %i "
441*4882a593Smuzhiyun "errno: %i", ret, errno);
442*4882a593Smuzhiyun close(vcpu->fd);
443*4882a593Smuzhiyun TEST_ASSERT(ret == 0, "Close of VCPU fd failed, rc: %i "
444*4882a593Smuzhiyun "errno: %i", ret, errno);
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun list_del(&vcpu->list);
447*4882a593Smuzhiyun free(vcpu);
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun
kvm_vm_release(struct kvm_vm * vmp)450*4882a593Smuzhiyun void kvm_vm_release(struct kvm_vm *vmp)
451*4882a593Smuzhiyun {
452*4882a593Smuzhiyun struct vcpu *vcpu, *tmp;
453*4882a593Smuzhiyun int ret;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun list_for_each_entry_safe(vcpu, tmp, &vmp->vcpus, list)
456*4882a593Smuzhiyun vm_vcpu_rm(vcpu);
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun ret = close(vmp->fd);
459*4882a593Smuzhiyun TEST_ASSERT(ret == 0, "Close of vm fd failed,\n"
460*4882a593Smuzhiyun " vmp->fd: %i rc: %i errno: %i", vmp->fd, ret, errno);
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun close(vmp->kvm_fd);
463*4882a593Smuzhiyun TEST_ASSERT(ret == 0, "Close of /dev/kvm fd failed,\n"
464*4882a593Smuzhiyun " vmp->kvm_fd: %i rc: %i errno: %i", vmp->kvm_fd, ret, errno);
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun
__vm_mem_region_delete(struct kvm_vm * vm,struct userspace_mem_region * region)467*4882a593Smuzhiyun static void __vm_mem_region_delete(struct kvm_vm *vm,
468*4882a593Smuzhiyun struct userspace_mem_region *region)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun int ret;
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun list_del(®ion->list);
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun region->region.memory_size = 0;
475*4882a593Smuzhiyun ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region);
476*4882a593Smuzhiyun TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed, "
477*4882a593Smuzhiyun "rc: %i errno: %i", ret, errno);
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun sparsebit_free(®ion->unused_phy_pages);
480*4882a593Smuzhiyun ret = munmap(region->mmap_start, region->mmap_size);
481*4882a593Smuzhiyun TEST_ASSERT(ret == 0, "munmap failed, rc: %i errno: %i", ret, errno);
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun free(region);
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun /*
487*4882a593Smuzhiyun * Destroys and frees the VM pointed to by vmp.
488*4882a593Smuzhiyun */
kvm_vm_free(struct kvm_vm * vmp)489*4882a593Smuzhiyun void kvm_vm_free(struct kvm_vm *vmp)
490*4882a593Smuzhiyun {
491*4882a593Smuzhiyun struct userspace_mem_region *region, *tmp;
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun if (vmp == NULL)
494*4882a593Smuzhiyun return;
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun /* Free userspace_mem_regions. */
497*4882a593Smuzhiyun list_for_each_entry_safe(region, tmp, &vmp->userspace_mem_regions, list)
498*4882a593Smuzhiyun __vm_mem_region_delete(vmp, region);
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun /* Free sparsebit arrays. */
501*4882a593Smuzhiyun sparsebit_free(&vmp->vpages_valid);
502*4882a593Smuzhiyun sparsebit_free(&vmp->vpages_mapped);
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun kvm_vm_release(vmp);
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun /* Free the structure describing the VM. */
507*4882a593Smuzhiyun free(vmp);
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun /*
511*4882a593Smuzhiyun * Memory Compare, host virtual to guest virtual
512*4882a593Smuzhiyun *
513*4882a593Smuzhiyun * Input Args:
514*4882a593Smuzhiyun * hva - Starting host virtual address
515*4882a593Smuzhiyun * vm - Virtual Machine
516*4882a593Smuzhiyun * gva - Starting guest virtual address
517*4882a593Smuzhiyun * len - number of bytes to compare
518*4882a593Smuzhiyun *
519*4882a593Smuzhiyun * Output Args: None
520*4882a593Smuzhiyun *
521*4882a593Smuzhiyun * Input/Output Args: None
522*4882a593Smuzhiyun *
523*4882a593Smuzhiyun * Return:
524*4882a593Smuzhiyun * Returns 0 if the bytes starting at hva for a length of len
525*4882a593Smuzhiyun * are equal the guest virtual bytes starting at gva. Returns
526*4882a593Smuzhiyun * a value < 0, if bytes at hva are less than those at gva.
527*4882a593Smuzhiyun * Otherwise a value > 0 is returned.
528*4882a593Smuzhiyun *
529*4882a593Smuzhiyun * Compares the bytes starting at the host virtual address hva, for
530*4882a593Smuzhiyun * a length of len, to the guest bytes starting at the guest virtual
531*4882a593Smuzhiyun * address given by gva.
532*4882a593Smuzhiyun */
kvm_memcmp_hva_gva(void * hva,struct kvm_vm * vm,vm_vaddr_t gva,size_t len)533*4882a593Smuzhiyun int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len)
534*4882a593Smuzhiyun {
535*4882a593Smuzhiyun size_t amt;
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun /*
538*4882a593Smuzhiyun * Compare a batch of bytes until either a match is found
539*4882a593Smuzhiyun * or all the bytes have been compared.
540*4882a593Smuzhiyun */
541*4882a593Smuzhiyun for (uintptr_t offset = 0; offset < len; offset += amt) {
542*4882a593Smuzhiyun uintptr_t ptr1 = (uintptr_t)hva + offset;
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun /*
545*4882a593Smuzhiyun * Determine host address for guest virtual address
546*4882a593Smuzhiyun * at offset.
547*4882a593Smuzhiyun */
548*4882a593Smuzhiyun uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset);
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun /*
551*4882a593Smuzhiyun * Determine amount to compare on this pass.
552*4882a593Smuzhiyun * Don't allow the comparsion to cross a page boundary.
553*4882a593Smuzhiyun */
554*4882a593Smuzhiyun amt = len - offset;
555*4882a593Smuzhiyun if ((ptr1 >> vm->page_shift) != ((ptr1 + amt) >> vm->page_shift))
556*4882a593Smuzhiyun amt = vm->page_size - (ptr1 % vm->page_size);
557*4882a593Smuzhiyun if ((ptr2 >> vm->page_shift) != ((ptr2 + amt) >> vm->page_shift))
558*4882a593Smuzhiyun amt = vm->page_size - (ptr2 % vm->page_size);
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift));
561*4882a593Smuzhiyun assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift));
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun /*
564*4882a593Smuzhiyun * Perform the comparison. If there is a difference
565*4882a593Smuzhiyun * return that result to the caller, otherwise need
566*4882a593Smuzhiyun * to continue on looking for a mismatch.
567*4882a593Smuzhiyun */
568*4882a593Smuzhiyun int ret = memcmp((void *)ptr1, (void *)ptr2, amt);
569*4882a593Smuzhiyun if (ret != 0)
570*4882a593Smuzhiyun return ret;
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun /*
574*4882a593Smuzhiyun * No mismatch found. Let the caller know the two memory
575*4882a593Smuzhiyun * areas are equal.
576*4882a593Smuzhiyun */
577*4882a593Smuzhiyun return 0;
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun /*
581*4882a593Smuzhiyun * VM Userspace Memory Region Add
582*4882a593Smuzhiyun *
583*4882a593Smuzhiyun * Input Args:
584*4882a593Smuzhiyun * vm - Virtual Machine
585*4882a593Smuzhiyun * backing_src - Storage source for this region.
586*4882a593Smuzhiyun * NULL to use anonymous memory.
587*4882a593Smuzhiyun * guest_paddr - Starting guest physical address
588*4882a593Smuzhiyun * slot - KVM region slot
589*4882a593Smuzhiyun * npages - Number of physical pages
590*4882a593Smuzhiyun * flags - KVM memory region flags (e.g. KVM_MEM_LOG_DIRTY_PAGES)
591*4882a593Smuzhiyun *
592*4882a593Smuzhiyun * Output Args: None
593*4882a593Smuzhiyun *
594*4882a593Smuzhiyun * Return: None
595*4882a593Smuzhiyun *
596*4882a593Smuzhiyun * Allocates a memory area of the number of pages specified by npages
597*4882a593Smuzhiyun * and maps it to the VM specified by vm, at a starting physical address
598*4882a593Smuzhiyun * given by guest_paddr. The region is created with a KVM region slot
599*4882a593Smuzhiyun * given by slot, which must be unique and < KVM_MEM_SLOTS_NUM. The
600*4882a593Smuzhiyun * region is created with the flags given by flags.
601*4882a593Smuzhiyun */
vm_userspace_mem_region_add(struct kvm_vm * vm,enum vm_mem_backing_src_type src_type,uint64_t guest_paddr,uint32_t slot,uint64_t npages,uint32_t flags)602*4882a593Smuzhiyun void vm_userspace_mem_region_add(struct kvm_vm *vm,
603*4882a593Smuzhiyun enum vm_mem_backing_src_type src_type,
604*4882a593Smuzhiyun uint64_t guest_paddr, uint32_t slot, uint64_t npages,
605*4882a593Smuzhiyun uint32_t flags)
606*4882a593Smuzhiyun {
607*4882a593Smuzhiyun int ret;
608*4882a593Smuzhiyun struct userspace_mem_region *region;
609*4882a593Smuzhiyun size_t huge_page_size = KVM_UTIL_PGS_PER_HUGEPG * vm->page_size;
610*4882a593Smuzhiyun size_t alignment;
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages,
613*4882a593Smuzhiyun "Number of guest pages is not compatible with the host. "
614*4882a593Smuzhiyun "Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages));
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical "
617*4882a593Smuzhiyun "address not on a page boundary.\n"
618*4882a593Smuzhiyun " guest_paddr: 0x%lx vm->page_size: 0x%x",
619*4882a593Smuzhiyun guest_paddr, vm->page_size);
620*4882a593Smuzhiyun TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1)
621*4882a593Smuzhiyun <= vm->max_gfn, "Physical range beyond maximum "
622*4882a593Smuzhiyun "supported physical address,\n"
623*4882a593Smuzhiyun " guest_paddr: 0x%lx npages: 0x%lx\n"
624*4882a593Smuzhiyun " vm->max_gfn: 0x%lx vm->page_size: 0x%x",
625*4882a593Smuzhiyun guest_paddr, npages, vm->max_gfn, vm->page_size);
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun /*
628*4882a593Smuzhiyun * Confirm a mem region with an overlapping address doesn't
629*4882a593Smuzhiyun * already exist.
630*4882a593Smuzhiyun */
631*4882a593Smuzhiyun region = (struct userspace_mem_region *) userspace_mem_region_find(
632*4882a593Smuzhiyun vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1);
633*4882a593Smuzhiyun if (region != NULL)
634*4882a593Smuzhiyun TEST_FAIL("overlapping userspace_mem_region already "
635*4882a593Smuzhiyun "exists\n"
636*4882a593Smuzhiyun " requested guest_paddr: 0x%lx npages: 0x%lx "
637*4882a593Smuzhiyun "page_size: 0x%x\n"
638*4882a593Smuzhiyun " existing guest_paddr: 0x%lx size: 0x%lx",
639*4882a593Smuzhiyun guest_paddr, npages, vm->page_size,
640*4882a593Smuzhiyun (uint64_t) region->region.guest_phys_addr,
641*4882a593Smuzhiyun (uint64_t) region->region.memory_size);
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun /* Confirm no region with the requested slot already exists. */
644*4882a593Smuzhiyun list_for_each_entry(region, &vm->userspace_mem_regions, list) {
645*4882a593Smuzhiyun if (region->region.slot != slot)
646*4882a593Smuzhiyun continue;
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun TEST_FAIL("A mem region with the requested slot "
649*4882a593Smuzhiyun "already exists.\n"
650*4882a593Smuzhiyun " requested slot: %u paddr: 0x%lx npages: 0x%lx\n"
651*4882a593Smuzhiyun " existing slot: %u paddr: 0x%lx size: 0x%lx",
652*4882a593Smuzhiyun slot, guest_paddr, npages,
653*4882a593Smuzhiyun region->region.slot,
654*4882a593Smuzhiyun (uint64_t) region->region.guest_phys_addr,
655*4882a593Smuzhiyun (uint64_t) region->region.memory_size);
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun /* Allocate and initialize new mem region structure. */
659*4882a593Smuzhiyun region = calloc(1, sizeof(*region));
660*4882a593Smuzhiyun TEST_ASSERT(region != NULL, "Insufficient Memory");
661*4882a593Smuzhiyun region->mmap_size = npages * vm->page_size;
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun #ifdef __s390x__
664*4882a593Smuzhiyun /* On s390x, the host address must be aligned to 1M (due to PGSTEs) */
665*4882a593Smuzhiyun alignment = 0x100000;
666*4882a593Smuzhiyun #else
667*4882a593Smuzhiyun alignment = 1;
668*4882a593Smuzhiyun #endif
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun if (src_type == VM_MEM_SRC_ANONYMOUS_THP)
671*4882a593Smuzhiyun alignment = max(huge_page_size, alignment);
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun /* Add enough memory to align up if necessary */
674*4882a593Smuzhiyun if (alignment > 1)
675*4882a593Smuzhiyun region->mmap_size += alignment;
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun region->mmap_start = mmap(NULL, region->mmap_size,
678*4882a593Smuzhiyun PROT_READ | PROT_WRITE,
679*4882a593Smuzhiyun MAP_PRIVATE | MAP_ANONYMOUS
680*4882a593Smuzhiyun | (src_type == VM_MEM_SRC_ANONYMOUS_HUGETLB ? MAP_HUGETLB : 0),
681*4882a593Smuzhiyun -1, 0);
682*4882a593Smuzhiyun TEST_ASSERT(region->mmap_start != MAP_FAILED,
683*4882a593Smuzhiyun "test_malloc failed, mmap_start: %p errno: %i",
684*4882a593Smuzhiyun region->mmap_start, errno);
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun /* Align host address */
687*4882a593Smuzhiyun region->host_mem = align(region->mmap_start, alignment);
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun /* As needed perform madvise */
690*4882a593Smuzhiyun if (src_type == VM_MEM_SRC_ANONYMOUS || src_type == VM_MEM_SRC_ANONYMOUS_THP) {
691*4882a593Smuzhiyun struct stat statbuf;
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun ret = stat("/sys/kernel/mm/transparent_hugepage", &statbuf);
694*4882a593Smuzhiyun TEST_ASSERT(ret == 0 || (ret == -1 && errno == ENOENT),
695*4882a593Smuzhiyun "stat /sys/kernel/mm/transparent_hugepage");
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun TEST_ASSERT(ret == 0 || src_type != VM_MEM_SRC_ANONYMOUS_THP,
698*4882a593Smuzhiyun "VM_MEM_SRC_ANONYMOUS_THP requires THP to be configured in the host kernel");
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun if (ret == 0) {
701*4882a593Smuzhiyun ret = madvise(region->host_mem, npages * vm->page_size,
702*4882a593Smuzhiyun src_type == VM_MEM_SRC_ANONYMOUS ? MADV_NOHUGEPAGE : MADV_HUGEPAGE);
703*4882a593Smuzhiyun TEST_ASSERT(ret == 0, "madvise failed, addr: %p length: 0x%lx src_type: %x",
704*4882a593Smuzhiyun region->host_mem, npages * vm->page_size, src_type);
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun }
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun region->unused_phy_pages = sparsebit_alloc();
709*4882a593Smuzhiyun sparsebit_set_num(region->unused_phy_pages,
710*4882a593Smuzhiyun guest_paddr >> vm->page_shift, npages);
711*4882a593Smuzhiyun region->region.slot = slot;
712*4882a593Smuzhiyun region->region.flags = flags;
713*4882a593Smuzhiyun region->region.guest_phys_addr = guest_paddr;
714*4882a593Smuzhiyun region->region.memory_size = npages * vm->page_size;
715*4882a593Smuzhiyun region->region.userspace_addr = (uintptr_t) region->host_mem;
716*4882a593Smuzhiyun ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region);
717*4882a593Smuzhiyun TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
718*4882a593Smuzhiyun " rc: %i errno: %i\n"
719*4882a593Smuzhiyun " slot: %u flags: 0x%x\n"
720*4882a593Smuzhiyun " guest_phys_addr: 0x%lx size: 0x%lx",
721*4882a593Smuzhiyun ret, errno, slot, flags,
722*4882a593Smuzhiyun guest_paddr, (uint64_t) region->region.memory_size);
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun /* Add to linked-list of memory regions. */
725*4882a593Smuzhiyun list_add(®ion->list, &vm->userspace_mem_regions);
726*4882a593Smuzhiyun }
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun /*
729*4882a593Smuzhiyun * Memslot to region
730*4882a593Smuzhiyun *
731*4882a593Smuzhiyun * Input Args:
732*4882a593Smuzhiyun * vm - Virtual Machine
733*4882a593Smuzhiyun * memslot - KVM memory slot ID
734*4882a593Smuzhiyun *
735*4882a593Smuzhiyun * Output Args: None
736*4882a593Smuzhiyun *
737*4882a593Smuzhiyun * Return:
738*4882a593Smuzhiyun * Pointer to memory region structure that describe memory region
739*4882a593Smuzhiyun * using kvm memory slot ID given by memslot. TEST_ASSERT failure
740*4882a593Smuzhiyun * on error (e.g. currently no memory region using memslot as a KVM
741*4882a593Smuzhiyun * memory slot ID).
742*4882a593Smuzhiyun */
743*4882a593Smuzhiyun struct userspace_mem_region *
memslot2region(struct kvm_vm * vm,uint32_t memslot)744*4882a593Smuzhiyun memslot2region(struct kvm_vm *vm, uint32_t memslot)
745*4882a593Smuzhiyun {
746*4882a593Smuzhiyun struct userspace_mem_region *region;
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun list_for_each_entry(region, &vm->userspace_mem_regions, list) {
749*4882a593Smuzhiyun if (region->region.slot == memslot)
750*4882a593Smuzhiyun return region;
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun fprintf(stderr, "No mem region with the requested slot found,\n"
754*4882a593Smuzhiyun " requested slot: %u\n", memslot);
755*4882a593Smuzhiyun fputs("---- vm dump ----\n", stderr);
756*4882a593Smuzhiyun vm_dump(stderr, vm, 2);
757*4882a593Smuzhiyun TEST_FAIL("Mem region not found");
758*4882a593Smuzhiyun return NULL;
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun /*
762*4882a593Smuzhiyun * VM Memory Region Flags Set
763*4882a593Smuzhiyun *
764*4882a593Smuzhiyun * Input Args:
765*4882a593Smuzhiyun * vm - Virtual Machine
766*4882a593Smuzhiyun * flags - Starting guest physical address
767*4882a593Smuzhiyun *
768*4882a593Smuzhiyun * Output Args: None
769*4882a593Smuzhiyun *
770*4882a593Smuzhiyun * Return: None
771*4882a593Smuzhiyun *
772*4882a593Smuzhiyun * Sets the flags of the memory region specified by the value of slot,
773*4882a593Smuzhiyun * to the values given by flags.
774*4882a593Smuzhiyun */
vm_mem_region_set_flags(struct kvm_vm * vm,uint32_t slot,uint32_t flags)775*4882a593Smuzhiyun void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags)
776*4882a593Smuzhiyun {
777*4882a593Smuzhiyun int ret;
778*4882a593Smuzhiyun struct userspace_mem_region *region;
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun region = memslot2region(vm, slot);
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun region->region.flags = flags;
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region);
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
787*4882a593Smuzhiyun " rc: %i errno: %i slot: %u flags: 0x%x",
788*4882a593Smuzhiyun ret, errno, slot, flags);
789*4882a593Smuzhiyun }
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun /*
792*4882a593Smuzhiyun * VM Memory Region Move
793*4882a593Smuzhiyun *
794*4882a593Smuzhiyun * Input Args:
795*4882a593Smuzhiyun * vm - Virtual Machine
796*4882a593Smuzhiyun * slot - Slot of the memory region to move
797*4882a593Smuzhiyun * new_gpa - Starting guest physical address
798*4882a593Smuzhiyun *
799*4882a593Smuzhiyun * Output Args: None
800*4882a593Smuzhiyun *
801*4882a593Smuzhiyun * Return: None
802*4882a593Smuzhiyun *
803*4882a593Smuzhiyun * Change the gpa of a memory region.
804*4882a593Smuzhiyun */
vm_mem_region_move(struct kvm_vm * vm,uint32_t slot,uint64_t new_gpa)805*4882a593Smuzhiyun void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa)
806*4882a593Smuzhiyun {
807*4882a593Smuzhiyun struct userspace_mem_region *region;
808*4882a593Smuzhiyun int ret;
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun region = memslot2region(vm, slot);
811*4882a593Smuzhiyun
812*4882a593Smuzhiyun region->region.guest_phys_addr = new_gpa;
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region);
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION failed\n"
817*4882a593Smuzhiyun "ret: %i errno: %i slot: %u new_gpa: 0x%lx",
818*4882a593Smuzhiyun ret, errno, slot, new_gpa);
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun /*
822*4882a593Smuzhiyun * VM Memory Region Delete
823*4882a593Smuzhiyun *
824*4882a593Smuzhiyun * Input Args:
825*4882a593Smuzhiyun * vm - Virtual Machine
826*4882a593Smuzhiyun * slot - Slot of the memory region to delete
827*4882a593Smuzhiyun *
828*4882a593Smuzhiyun * Output Args: None
829*4882a593Smuzhiyun *
830*4882a593Smuzhiyun * Return: None
831*4882a593Smuzhiyun *
832*4882a593Smuzhiyun * Delete a memory region.
833*4882a593Smuzhiyun */
vm_mem_region_delete(struct kvm_vm * vm,uint32_t slot)834*4882a593Smuzhiyun void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot)
835*4882a593Smuzhiyun {
836*4882a593Smuzhiyun __vm_mem_region_delete(vm, memslot2region(vm, slot));
837*4882a593Smuzhiyun }
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun /*
840*4882a593Smuzhiyun * VCPU mmap Size
841*4882a593Smuzhiyun *
842*4882a593Smuzhiyun * Input Args: None
843*4882a593Smuzhiyun *
844*4882a593Smuzhiyun * Output Args: None
845*4882a593Smuzhiyun *
846*4882a593Smuzhiyun * Return:
847*4882a593Smuzhiyun * Size of VCPU state
848*4882a593Smuzhiyun *
849*4882a593Smuzhiyun * Returns the size of the structure pointed to by the return value
850*4882a593Smuzhiyun * of vcpu_state().
851*4882a593Smuzhiyun */
vcpu_mmap_sz(void)852*4882a593Smuzhiyun static int vcpu_mmap_sz(void)
853*4882a593Smuzhiyun {
854*4882a593Smuzhiyun int dev_fd, ret;
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun dev_fd = open(KVM_DEV_PATH, O_RDONLY);
857*4882a593Smuzhiyun if (dev_fd < 0)
858*4882a593Smuzhiyun exit(KSFT_SKIP);
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
861*4882a593Smuzhiyun TEST_ASSERT(ret >= sizeof(struct kvm_run),
862*4882a593Smuzhiyun "%s KVM_GET_VCPU_MMAP_SIZE ioctl failed, rc: %i errno: %i",
863*4882a593Smuzhiyun __func__, ret, errno);
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun close(dev_fd);
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun return ret;
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun /*
871*4882a593Smuzhiyun * VM VCPU Add
872*4882a593Smuzhiyun *
873*4882a593Smuzhiyun * Input Args:
874*4882a593Smuzhiyun * vm - Virtual Machine
875*4882a593Smuzhiyun * vcpuid - VCPU ID
876*4882a593Smuzhiyun *
877*4882a593Smuzhiyun * Output Args: None
878*4882a593Smuzhiyun *
879*4882a593Smuzhiyun * Return: None
880*4882a593Smuzhiyun *
881*4882a593Smuzhiyun * Adds a virtual CPU to the VM specified by vm with the ID given by vcpuid.
882*4882a593Smuzhiyun * No additional VCPU setup is done.
883*4882a593Smuzhiyun */
vm_vcpu_add(struct kvm_vm * vm,uint32_t vcpuid)884*4882a593Smuzhiyun void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid)
885*4882a593Smuzhiyun {
886*4882a593Smuzhiyun struct vcpu *vcpu;
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun /* Confirm a vcpu with the specified id doesn't already exist. */
889*4882a593Smuzhiyun vcpu = vcpu_find(vm, vcpuid);
890*4882a593Smuzhiyun if (vcpu != NULL)
891*4882a593Smuzhiyun TEST_FAIL("vcpu with the specified id "
892*4882a593Smuzhiyun "already exists,\n"
893*4882a593Smuzhiyun " requested vcpuid: %u\n"
894*4882a593Smuzhiyun " existing vcpuid: %u state: %p",
895*4882a593Smuzhiyun vcpuid, vcpu->id, vcpu->state);
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun /* Allocate and initialize new vcpu structure. */
898*4882a593Smuzhiyun vcpu = calloc(1, sizeof(*vcpu));
899*4882a593Smuzhiyun TEST_ASSERT(vcpu != NULL, "Insufficient Memory");
900*4882a593Smuzhiyun vcpu->id = vcpuid;
901*4882a593Smuzhiyun vcpu->fd = ioctl(vm->fd, KVM_CREATE_VCPU, vcpuid);
902*4882a593Smuzhiyun TEST_ASSERT(vcpu->fd >= 0, "KVM_CREATE_VCPU failed, rc: %i errno: %i",
903*4882a593Smuzhiyun vcpu->fd, errno);
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->state), "vcpu mmap size "
906*4882a593Smuzhiyun "smaller than expected, vcpu_mmap_sz: %i expected_min: %zi",
907*4882a593Smuzhiyun vcpu_mmap_sz(), sizeof(*vcpu->state));
908*4882a593Smuzhiyun vcpu->state = (struct kvm_run *) mmap(NULL, sizeof(*vcpu->state),
909*4882a593Smuzhiyun PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0);
910*4882a593Smuzhiyun TEST_ASSERT(vcpu->state != MAP_FAILED, "mmap vcpu_state failed, "
911*4882a593Smuzhiyun "vcpu id: %u errno: %i", vcpuid, errno);
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun /* Add to linked-list of VCPUs. */
914*4882a593Smuzhiyun list_add(&vcpu->list, &vm->vcpus);
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun /*
918*4882a593Smuzhiyun * VM Virtual Address Unused Gap
919*4882a593Smuzhiyun *
920*4882a593Smuzhiyun * Input Args:
921*4882a593Smuzhiyun * vm - Virtual Machine
922*4882a593Smuzhiyun * sz - Size (bytes)
923*4882a593Smuzhiyun * vaddr_min - Minimum Virtual Address
924*4882a593Smuzhiyun *
925*4882a593Smuzhiyun * Output Args: None
926*4882a593Smuzhiyun *
927*4882a593Smuzhiyun * Return:
928*4882a593Smuzhiyun * Lowest virtual address at or below vaddr_min, with at least
929*4882a593Smuzhiyun * sz unused bytes. TEST_ASSERT failure if no area of at least
930*4882a593Smuzhiyun * size sz is available.
931*4882a593Smuzhiyun *
932*4882a593Smuzhiyun * Within the VM specified by vm, locates the lowest starting virtual
933*4882a593Smuzhiyun * address >= vaddr_min, that has at least sz unallocated bytes. A
934*4882a593Smuzhiyun * TEST_ASSERT failure occurs for invalid input or no area of at least
935*4882a593Smuzhiyun * sz unallocated bytes >= vaddr_min is available.
936*4882a593Smuzhiyun */
vm_vaddr_unused_gap(struct kvm_vm * vm,size_t sz,vm_vaddr_t vaddr_min)937*4882a593Smuzhiyun static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
938*4882a593Smuzhiyun vm_vaddr_t vaddr_min)
939*4882a593Smuzhiyun {
940*4882a593Smuzhiyun uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift;
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun /* Determine lowest permitted virtual page index. */
943*4882a593Smuzhiyun uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift;
944*4882a593Smuzhiyun if ((pgidx_start * vm->page_size) < vaddr_min)
945*4882a593Smuzhiyun goto no_va_found;
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun /* Loop over section with enough valid virtual page indexes. */
948*4882a593Smuzhiyun if (!sparsebit_is_set_num(vm->vpages_valid,
949*4882a593Smuzhiyun pgidx_start, pages))
950*4882a593Smuzhiyun pgidx_start = sparsebit_next_set_num(vm->vpages_valid,
951*4882a593Smuzhiyun pgidx_start, pages);
952*4882a593Smuzhiyun do {
953*4882a593Smuzhiyun /*
954*4882a593Smuzhiyun * Are there enough unused virtual pages available at
955*4882a593Smuzhiyun * the currently proposed starting virtual page index.
956*4882a593Smuzhiyun * If not, adjust proposed starting index to next
957*4882a593Smuzhiyun * possible.
958*4882a593Smuzhiyun */
959*4882a593Smuzhiyun if (sparsebit_is_clear_num(vm->vpages_mapped,
960*4882a593Smuzhiyun pgidx_start, pages))
961*4882a593Smuzhiyun goto va_found;
962*4882a593Smuzhiyun pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped,
963*4882a593Smuzhiyun pgidx_start, pages);
964*4882a593Smuzhiyun if (pgidx_start == 0)
965*4882a593Smuzhiyun goto no_va_found;
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun /*
968*4882a593Smuzhiyun * If needed, adjust proposed starting virtual address,
969*4882a593Smuzhiyun * to next range of valid virtual addresses.
970*4882a593Smuzhiyun */
971*4882a593Smuzhiyun if (!sparsebit_is_set_num(vm->vpages_valid,
972*4882a593Smuzhiyun pgidx_start, pages)) {
973*4882a593Smuzhiyun pgidx_start = sparsebit_next_set_num(
974*4882a593Smuzhiyun vm->vpages_valid, pgidx_start, pages);
975*4882a593Smuzhiyun if (pgidx_start == 0)
976*4882a593Smuzhiyun goto no_va_found;
977*4882a593Smuzhiyun }
978*4882a593Smuzhiyun } while (pgidx_start != 0);
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun no_va_found:
981*4882a593Smuzhiyun TEST_FAIL("No vaddr of specified pages available, pages: 0x%lx", pages);
982*4882a593Smuzhiyun
983*4882a593Smuzhiyun /* NOT REACHED */
984*4882a593Smuzhiyun return -1;
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun va_found:
987*4882a593Smuzhiyun TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid,
988*4882a593Smuzhiyun pgidx_start, pages),
989*4882a593Smuzhiyun "Unexpected, invalid virtual page index range,\n"
990*4882a593Smuzhiyun " pgidx_start: 0x%lx\n"
991*4882a593Smuzhiyun " pages: 0x%lx",
992*4882a593Smuzhiyun pgidx_start, pages);
993*4882a593Smuzhiyun TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped,
994*4882a593Smuzhiyun pgidx_start, pages),
995*4882a593Smuzhiyun "Unexpected, pages already mapped,\n"
996*4882a593Smuzhiyun " pgidx_start: 0x%lx\n"
997*4882a593Smuzhiyun " pages: 0x%lx",
998*4882a593Smuzhiyun pgidx_start, pages);
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun return pgidx_start * vm->page_size;
1001*4882a593Smuzhiyun }
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun /*
1004*4882a593Smuzhiyun * VM Virtual Address Allocate
1005*4882a593Smuzhiyun *
1006*4882a593Smuzhiyun * Input Args:
1007*4882a593Smuzhiyun * vm - Virtual Machine
1008*4882a593Smuzhiyun * sz - Size in bytes
1009*4882a593Smuzhiyun * vaddr_min - Minimum starting virtual address
1010*4882a593Smuzhiyun * data_memslot - Memory region slot for data pages
1011*4882a593Smuzhiyun * pgd_memslot - Memory region slot for new virtual translation tables
1012*4882a593Smuzhiyun *
1013*4882a593Smuzhiyun * Output Args: None
1014*4882a593Smuzhiyun *
1015*4882a593Smuzhiyun * Return:
1016*4882a593Smuzhiyun * Starting guest virtual address
1017*4882a593Smuzhiyun *
1018*4882a593Smuzhiyun * Allocates at least sz bytes within the virtual address space of the vm
1019*4882a593Smuzhiyun * given by vm. The allocated bytes are mapped to a virtual address >=
1020*4882a593Smuzhiyun * the address given by vaddr_min. Note that each allocation uses a
1021*4882a593Smuzhiyun * a unique set of pages, with the minimum real allocation being at least
1022*4882a593Smuzhiyun * a page.
1023*4882a593Smuzhiyun */
vm_vaddr_alloc(struct kvm_vm * vm,size_t sz,vm_vaddr_t vaddr_min,uint32_t data_memslot,uint32_t pgd_memslot)1024*4882a593Smuzhiyun vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
1025*4882a593Smuzhiyun uint32_t data_memslot, uint32_t pgd_memslot)
1026*4882a593Smuzhiyun {
1027*4882a593Smuzhiyun uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun virt_pgd_alloc(vm, pgd_memslot);
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun /*
1032*4882a593Smuzhiyun * Find an unused range of virtual page addresses of at least
1033*4882a593Smuzhiyun * pages in length.
1034*4882a593Smuzhiyun */
1035*4882a593Smuzhiyun vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min);
1036*4882a593Smuzhiyun
1037*4882a593Smuzhiyun /* Map the virtual pages. */
1038*4882a593Smuzhiyun for (vm_vaddr_t vaddr = vaddr_start; pages > 0;
1039*4882a593Smuzhiyun pages--, vaddr += vm->page_size) {
1040*4882a593Smuzhiyun vm_paddr_t paddr;
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun paddr = vm_phy_page_alloc(vm,
1043*4882a593Smuzhiyun KVM_UTIL_MIN_PFN * vm->page_size, data_memslot);
1044*4882a593Smuzhiyun
1045*4882a593Smuzhiyun virt_pg_map(vm, vaddr, paddr, pgd_memslot);
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun sparsebit_set(vm->vpages_mapped,
1048*4882a593Smuzhiyun vaddr >> vm->page_shift);
1049*4882a593Smuzhiyun }
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun return vaddr_start;
1052*4882a593Smuzhiyun }
1053*4882a593Smuzhiyun
1054*4882a593Smuzhiyun /*
1055*4882a593Smuzhiyun * Map a range of VM virtual address to the VM's physical address
1056*4882a593Smuzhiyun *
1057*4882a593Smuzhiyun * Input Args:
1058*4882a593Smuzhiyun * vm - Virtual Machine
1059*4882a593Smuzhiyun * vaddr - Virtuall address to map
1060*4882a593Smuzhiyun * paddr - VM Physical Address
1061*4882a593Smuzhiyun * npages - The number of pages to map
1062*4882a593Smuzhiyun * pgd_memslot - Memory region slot for new virtual translation tables
1063*4882a593Smuzhiyun *
1064*4882a593Smuzhiyun * Output Args: None
1065*4882a593Smuzhiyun *
1066*4882a593Smuzhiyun * Return: None
1067*4882a593Smuzhiyun *
1068*4882a593Smuzhiyun * Within the VM given by @vm, creates a virtual translation for
1069*4882a593Smuzhiyun * @npages starting at @vaddr to the page range starting at @paddr.
1070*4882a593Smuzhiyun */
virt_map(struct kvm_vm * vm,uint64_t vaddr,uint64_t paddr,unsigned int npages,uint32_t pgd_memslot)1071*4882a593Smuzhiyun void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
1072*4882a593Smuzhiyun unsigned int npages, uint32_t pgd_memslot)
1073*4882a593Smuzhiyun {
1074*4882a593Smuzhiyun size_t page_size = vm->page_size;
1075*4882a593Smuzhiyun size_t size = npages * page_size;
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow");
1078*4882a593Smuzhiyun TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun while (npages--) {
1081*4882a593Smuzhiyun virt_pg_map(vm, vaddr, paddr, pgd_memslot);
1082*4882a593Smuzhiyun vaddr += page_size;
1083*4882a593Smuzhiyun paddr += page_size;
1084*4882a593Smuzhiyun }
1085*4882a593Smuzhiyun }
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun /*
1088*4882a593Smuzhiyun * Address VM Physical to Host Virtual
1089*4882a593Smuzhiyun *
1090*4882a593Smuzhiyun * Input Args:
1091*4882a593Smuzhiyun * vm - Virtual Machine
1092*4882a593Smuzhiyun * gpa - VM physical address
1093*4882a593Smuzhiyun *
1094*4882a593Smuzhiyun * Output Args: None
1095*4882a593Smuzhiyun *
1096*4882a593Smuzhiyun * Return:
1097*4882a593Smuzhiyun * Equivalent host virtual address
1098*4882a593Smuzhiyun *
1099*4882a593Smuzhiyun * Locates the memory region containing the VM physical address given
1100*4882a593Smuzhiyun * by gpa, within the VM given by vm. When found, the host virtual
1101*4882a593Smuzhiyun * address providing the memory to the vm physical address is returned.
1102*4882a593Smuzhiyun * A TEST_ASSERT failure occurs if no region containing gpa exists.
1103*4882a593Smuzhiyun */
addr_gpa2hva(struct kvm_vm * vm,vm_paddr_t gpa)1104*4882a593Smuzhiyun void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
1105*4882a593Smuzhiyun {
1106*4882a593Smuzhiyun struct userspace_mem_region *region;
1107*4882a593Smuzhiyun
1108*4882a593Smuzhiyun list_for_each_entry(region, &vm->userspace_mem_regions, list) {
1109*4882a593Smuzhiyun if ((gpa >= region->region.guest_phys_addr)
1110*4882a593Smuzhiyun && (gpa <= (region->region.guest_phys_addr
1111*4882a593Smuzhiyun + region->region.memory_size - 1)))
1112*4882a593Smuzhiyun return (void *) ((uintptr_t) region->host_mem
1113*4882a593Smuzhiyun + (gpa - region->region.guest_phys_addr));
1114*4882a593Smuzhiyun }
1115*4882a593Smuzhiyun
1116*4882a593Smuzhiyun TEST_FAIL("No vm physical memory at 0x%lx", gpa);
1117*4882a593Smuzhiyun return NULL;
1118*4882a593Smuzhiyun }
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyun /*
1121*4882a593Smuzhiyun * Address Host Virtual to VM Physical
1122*4882a593Smuzhiyun *
1123*4882a593Smuzhiyun * Input Args:
1124*4882a593Smuzhiyun * vm - Virtual Machine
1125*4882a593Smuzhiyun * hva - Host virtual address
1126*4882a593Smuzhiyun *
1127*4882a593Smuzhiyun * Output Args: None
1128*4882a593Smuzhiyun *
1129*4882a593Smuzhiyun * Return:
1130*4882a593Smuzhiyun * Equivalent VM physical address
1131*4882a593Smuzhiyun *
1132*4882a593Smuzhiyun * Locates the memory region containing the host virtual address given
1133*4882a593Smuzhiyun * by hva, within the VM given by vm. When found, the equivalent
1134*4882a593Smuzhiyun * VM physical address is returned. A TEST_ASSERT failure occurs if no
1135*4882a593Smuzhiyun * region containing hva exists.
1136*4882a593Smuzhiyun */
addr_hva2gpa(struct kvm_vm * vm,void * hva)1137*4882a593Smuzhiyun vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
1138*4882a593Smuzhiyun {
1139*4882a593Smuzhiyun struct userspace_mem_region *region;
1140*4882a593Smuzhiyun
1141*4882a593Smuzhiyun list_for_each_entry(region, &vm->userspace_mem_regions, list) {
1142*4882a593Smuzhiyun if ((hva >= region->host_mem)
1143*4882a593Smuzhiyun && (hva <= (region->host_mem
1144*4882a593Smuzhiyun + region->region.memory_size - 1)))
1145*4882a593Smuzhiyun return (vm_paddr_t) ((uintptr_t)
1146*4882a593Smuzhiyun region->region.guest_phys_addr
1147*4882a593Smuzhiyun + (hva - (uintptr_t) region->host_mem));
1148*4882a593Smuzhiyun }
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun TEST_FAIL("No mapping to a guest physical address, hva: %p", hva);
1151*4882a593Smuzhiyun return -1;
1152*4882a593Smuzhiyun }
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun /*
1155*4882a593Smuzhiyun * VM Create IRQ Chip
1156*4882a593Smuzhiyun *
1157*4882a593Smuzhiyun * Input Args:
1158*4882a593Smuzhiyun * vm - Virtual Machine
1159*4882a593Smuzhiyun *
1160*4882a593Smuzhiyun * Output Args: None
1161*4882a593Smuzhiyun *
1162*4882a593Smuzhiyun * Return: None
1163*4882a593Smuzhiyun *
1164*4882a593Smuzhiyun * Creates an interrupt controller chip for the VM specified by vm.
1165*4882a593Smuzhiyun */
vm_create_irqchip(struct kvm_vm * vm)1166*4882a593Smuzhiyun void vm_create_irqchip(struct kvm_vm *vm)
1167*4882a593Smuzhiyun {
1168*4882a593Smuzhiyun int ret;
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun ret = ioctl(vm->fd, KVM_CREATE_IRQCHIP, 0);
1171*4882a593Smuzhiyun TEST_ASSERT(ret == 0, "KVM_CREATE_IRQCHIP IOCTL failed, "
1172*4882a593Smuzhiyun "rc: %i errno: %i", ret, errno);
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyun vm->has_irqchip = true;
1175*4882a593Smuzhiyun }
1176*4882a593Smuzhiyun
1177*4882a593Smuzhiyun /*
1178*4882a593Smuzhiyun * VM VCPU State
1179*4882a593Smuzhiyun *
1180*4882a593Smuzhiyun * Input Args:
1181*4882a593Smuzhiyun * vm - Virtual Machine
1182*4882a593Smuzhiyun * vcpuid - VCPU ID
1183*4882a593Smuzhiyun *
1184*4882a593Smuzhiyun * Output Args: None
1185*4882a593Smuzhiyun *
1186*4882a593Smuzhiyun * Return:
1187*4882a593Smuzhiyun * Pointer to structure that describes the state of the VCPU.
1188*4882a593Smuzhiyun *
1189*4882a593Smuzhiyun * Locates and returns a pointer to a structure that describes the
1190*4882a593Smuzhiyun * state of the VCPU with the given vcpuid.
1191*4882a593Smuzhiyun */
vcpu_state(struct kvm_vm * vm,uint32_t vcpuid)1192*4882a593Smuzhiyun struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid)
1193*4882a593Smuzhiyun {
1194*4882a593Smuzhiyun struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1195*4882a593Smuzhiyun TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1196*4882a593Smuzhiyun
1197*4882a593Smuzhiyun return vcpu->state;
1198*4882a593Smuzhiyun }
1199*4882a593Smuzhiyun
1200*4882a593Smuzhiyun /*
1201*4882a593Smuzhiyun * VM VCPU Run
1202*4882a593Smuzhiyun *
1203*4882a593Smuzhiyun * Input Args:
1204*4882a593Smuzhiyun * vm - Virtual Machine
1205*4882a593Smuzhiyun * vcpuid - VCPU ID
1206*4882a593Smuzhiyun *
1207*4882a593Smuzhiyun * Output Args: None
1208*4882a593Smuzhiyun *
1209*4882a593Smuzhiyun * Return: None
1210*4882a593Smuzhiyun *
1211*4882a593Smuzhiyun * Switch to executing the code for the VCPU given by vcpuid, within the VM
1212*4882a593Smuzhiyun * given by vm.
1213*4882a593Smuzhiyun */
vcpu_run(struct kvm_vm * vm,uint32_t vcpuid)1214*4882a593Smuzhiyun void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
1215*4882a593Smuzhiyun {
1216*4882a593Smuzhiyun int ret = _vcpu_run(vm, vcpuid);
1217*4882a593Smuzhiyun TEST_ASSERT(ret == 0, "KVM_RUN IOCTL failed, "
1218*4882a593Smuzhiyun "rc: %i errno: %i", ret, errno);
1219*4882a593Smuzhiyun }
1220*4882a593Smuzhiyun
_vcpu_run(struct kvm_vm * vm,uint32_t vcpuid)1221*4882a593Smuzhiyun int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
1222*4882a593Smuzhiyun {
1223*4882a593Smuzhiyun struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1224*4882a593Smuzhiyun int rc;
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1227*4882a593Smuzhiyun do {
1228*4882a593Smuzhiyun rc = ioctl(vcpu->fd, KVM_RUN, NULL);
1229*4882a593Smuzhiyun } while (rc == -1 && errno == EINTR);
1230*4882a593Smuzhiyun
1231*4882a593Smuzhiyun assert_on_unhandled_exception(vm, vcpuid);
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun return rc;
1234*4882a593Smuzhiyun }
1235*4882a593Smuzhiyun
vcpu_run_complete_io(struct kvm_vm * vm,uint32_t vcpuid)1236*4882a593Smuzhiyun void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid)
1237*4882a593Smuzhiyun {
1238*4882a593Smuzhiyun struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1239*4882a593Smuzhiyun int ret;
1240*4882a593Smuzhiyun
1241*4882a593Smuzhiyun TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1242*4882a593Smuzhiyun
1243*4882a593Smuzhiyun vcpu->state->immediate_exit = 1;
1244*4882a593Smuzhiyun ret = ioctl(vcpu->fd, KVM_RUN, NULL);
1245*4882a593Smuzhiyun vcpu->state->immediate_exit = 0;
1246*4882a593Smuzhiyun
1247*4882a593Smuzhiyun TEST_ASSERT(ret == -1 && errno == EINTR,
1248*4882a593Smuzhiyun "KVM_RUN IOCTL didn't exit immediately, rc: %i, errno: %i",
1249*4882a593Smuzhiyun ret, errno);
1250*4882a593Smuzhiyun }
1251*4882a593Smuzhiyun
vcpu_set_guest_debug(struct kvm_vm * vm,uint32_t vcpuid,struct kvm_guest_debug * debug)1252*4882a593Smuzhiyun void vcpu_set_guest_debug(struct kvm_vm *vm, uint32_t vcpuid,
1253*4882a593Smuzhiyun struct kvm_guest_debug *debug)
1254*4882a593Smuzhiyun {
1255*4882a593Smuzhiyun struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1256*4882a593Smuzhiyun int ret = ioctl(vcpu->fd, KVM_SET_GUEST_DEBUG, debug);
1257*4882a593Smuzhiyun
1258*4882a593Smuzhiyun TEST_ASSERT(ret == 0, "KVM_SET_GUEST_DEBUG failed: %d", ret);
1259*4882a593Smuzhiyun }
1260*4882a593Smuzhiyun
1261*4882a593Smuzhiyun /*
1262*4882a593Smuzhiyun * VM VCPU Set MP State
1263*4882a593Smuzhiyun *
1264*4882a593Smuzhiyun * Input Args:
1265*4882a593Smuzhiyun * vm - Virtual Machine
1266*4882a593Smuzhiyun * vcpuid - VCPU ID
1267*4882a593Smuzhiyun * mp_state - mp_state to be set
1268*4882a593Smuzhiyun *
1269*4882a593Smuzhiyun * Output Args: None
1270*4882a593Smuzhiyun *
1271*4882a593Smuzhiyun * Return: None
1272*4882a593Smuzhiyun *
1273*4882a593Smuzhiyun * Sets the MP state of the VCPU given by vcpuid, to the state given
1274*4882a593Smuzhiyun * by mp_state.
1275*4882a593Smuzhiyun */
vcpu_set_mp_state(struct kvm_vm * vm,uint32_t vcpuid,struct kvm_mp_state * mp_state)1276*4882a593Smuzhiyun void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
1277*4882a593Smuzhiyun struct kvm_mp_state *mp_state)
1278*4882a593Smuzhiyun {
1279*4882a593Smuzhiyun struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1280*4882a593Smuzhiyun int ret;
1281*4882a593Smuzhiyun
1282*4882a593Smuzhiyun TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1283*4882a593Smuzhiyun
1284*4882a593Smuzhiyun ret = ioctl(vcpu->fd, KVM_SET_MP_STATE, mp_state);
1285*4882a593Smuzhiyun TEST_ASSERT(ret == 0, "KVM_SET_MP_STATE IOCTL failed, "
1286*4882a593Smuzhiyun "rc: %i errno: %i", ret, errno);
1287*4882a593Smuzhiyun }
1288*4882a593Smuzhiyun
1289*4882a593Smuzhiyun /*
1290*4882a593Smuzhiyun * VM VCPU Get Reg List
1291*4882a593Smuzhiyun *
1292*4882a593Smuzhiyun * Input Args:
1293*4882a593Smuzhiyun * vm - Virtual Machine
1294*4882a593Smuzhiyun * vcpuid - VCPU ID
1295*4882a593Smuzhiyun *
1296*4882a593Smuzhiyun * Output Args:
1297*4882a593Smuzhiyun * None
1298*4882a593Smuzhiyun *
1299*4882a593Smuzhiyun * Return:
1300*4882a593Smuzhiyun * A pointer to an allocated struct kvm_reg_list
1301*4882a593Smuzhiyun *
1302*4882a593Smuzhiyun * Get the list of guest registers which are supported for
1303*4882a593Smuzhiyun * KVM_GET_ONE_REG/KVM_SET_ONE_REG calls
1304*4882a593Smuzhiyun */
vcpu_get_reg_list(struct kvm_vm * vm,uint32_t vcpuid)1305*4882a593Smuzhiyun struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vm *vm, uint32_t vcpuid)
1306*4882a593Smuzhiyun {
1307*4882a593Smuzhiyun struct kvm_reg_list reg_list_n = { .n = 0 }, *reg_list;
1308*4882a593Smuzhiyun int ret;
1309*4882a593Smuzhiyun
1310*4882a593Smuzhiyun ret = _vcpu_ioctl(vm, vcpuid, KVM_GET_REG_LIST, ®_list_n);
1311*4882a593Smuzhiyun TEST_ASSERT(ret == -1 && errno == E2BIG, "KVM_GET_REG_LIST n=0");
1312*4882a593Smuzhiyun reg_list = calloc(1, sizeof(*reg_list) + reg_list_n.n * sizeof(__u64));
1313*4882a593Smuzhiyun reg_list->n = reg_list_n.n;
1314*4882a593Smuzhiyun vcpu_ioctl(vm, vcpuid, KVM_GET_REG_LIST, reg_list);
1315*4882a593Smuzhiyun return reg_list;
1316*4882a593Smuzhiyun }
1317*4882a593Smuzhiyun
1318*4882a593Smuzhiyun /*
1319*4882a593Smuzhiyun * VM VCPU Regs Get
1320*4882a593Smuzhiyun *
1321*4882a593Smuzhiyun * Input Args:
1322*4882a593Smuzhiyun * vm - Virtual Machine
1323*4882a593Smuzhiyun * vcpuid - VCPU ID
1324*4882a593Smuzhiyun *
1325*4882a593Smuzhiyun * Output Args:
1326*4882a593Smuzhiyun * regs - current state of VCPU regs
1327*4882a593Smuzhiyun *
1328*4882a593Smuzhiyun * Return: None
1329*4882a593Smuzhiyun *
1330*4882a593Smuzhiyun * Obtains the current register state for the VCPU specified by vcpuid
1331*4882a593Smuzhiyun * and stores it at the location given by regs.
1332*4882a593Smuzhiyun */
vcpu_regs_get(struct kvm_vm * vm,uint32_t vcpuid,struct kvm_regs * regs)1333*4882a593Smuzhiyun void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs)
1334*4882a593Smuzhiyun {
1335*4882a593Smuzhiyun struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1336*4882a593Smuzhiyun int ret;
1337*4882a593Smuzhiyun
1338*4882a593Smuzhiyun TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1339*4882a593Smuzhiyun
1340*4882a593Smuzhiyun ret = ioctl(vcpu->fd, KVM_GET_REGS, regs);
1341*4882a593Smuzhiyun TEST_ASSERT(ret == 0, "KVM_GET_REGS failed, rc: %i errno: %i",
1342*4882a593Smuzhiyun ret, errno);
1343*4882a593Smuzhiyun }
1344*4882a593Smuzhiyun
1345*4882a593Smuzhiyun /*
1346*4882a593Smuzhiyun * VM VCPU Regs Set
1347*4882a593Smuzhiyun *
1348*4882a593Smuzhiyun * Input Args:
1349*4882a593Smuzhiyun * vm - Virtual Machine
1350*4882a593Smuzhiyun * vcpuid - VCPU ID
1351*4882a593Smuzhiyun * regs - Values to set VCPU regs to
1352*4882a593Smuzhiyun *
1353*4882a593Smuzhiyun * Output Args: None
1354*4882a593Smuzhiyun *
1355*4882a593Smuzhiyun * Return: None
1356*4882a593Smuzhiyun *
1357*4882a593Smuzhiyun * Sets the regs of the VCPU specified by vcpuid to the values
1358*4882a593Smuzhiyun * given by regs.
1359*4882a593Smuzhiyun */
vcpu_regs_set(struct kvm_vm * vm,uint32_t vcpuid,struct kvm_regs * regs)1360*4882a593Smuzhiyun void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs)
1361*4882a593Smuzhiyun {
1362*4882a593Smuzhiyun struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1363*4882a593Smuzhiyun int ret;
1364*4882a593Smuzhiyun
1365*4882a593Smuzhiyun TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1366*4882a593Smuzhiyun
1367*4882a593Smuzhiyun ret = ioctl(vcpu->fd, KVM_SET_REGS, regs);
1368*4882a593Smuzhiyun TEST_ASSERT(ret == 0, "KVM_SET_REGS failed, rc: %i errno: %i",
1369*4882a593Smuzhiyun ret, errno);
1370*4882a593Smuzhiyun }
1371*4882a593Smuzhiyun
1372*4882a593Smuzhiyun #ifdef __KVM_HAVE_VCPU_EVENTS
vcpu_events_get(struct kvm_vm * vm,uint32_t vcpuid,struct kvm_vcpu_events * events)1373*4882a593Smuzhiyun void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid,
1374*4882a593Smuzhiyun struct kvm_vcpu_events *events)
1375*4882a593Smuzhiyun {
1376*4882a593Smuzhiyun struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1377*4882a593Smuzhiyun int ret;
1378*4882a593Smuzhiyun
1379*4882a593Smuzhiyun TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1380*4882a593Smuzhiyun
1381*4882a593Smuzhiyun ret = ioctl(vcpu->fd, KVM_GET_VCPU_EVENTS, events);
1382*4882a593Smuzhiyun TEST_ASSERT(ret == 0, "KVM_GET_VCPU_EVENTS, failed, rc: %i errno: %i",
1383*4882a593Smuzhiyun ret, errno);
1384*4882a593Smuzhiyun }
1385*4882a593Smuzhiyun
vcpu_events_set(struct kvm_vm * vm,uint32_t vcpuid,struct kvm_vcpu_events * events)1386*4882a593Smuzhiyun void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid,
1387*4882a593Smuzhiyun struct kvm_vcpu_events *events)
1388*4882a593Smuzhiyun {
1389*4882a593Smuzhiyun struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1390*4882a593Smuzhiyun int ret;
1391*4882a593Smuzhiyun
1392*4882a593Smuzhiyun TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1393*4882a593Smuzhiyun
1394*4882a593Smuzhiyun ret = ioctl(vcpu->fd, KVM_SET_VCPU_EVENTS, events);
1395*4882a593Smuzhiyun TEST_ASSERT(ret == 0, "KVM_SET_VCPU_EVENTS, failed, rc: %i errno: %i",
1396*4882a593Smuzhiyun ret, errno);
1397*4882a593Smuzhiyun }
1398*4882a593Smuzhiyun #endif
1399*4882a593Smuzhiyun
1400*4882a593Smuzhiyun #ifdef __x86_64__
vcpu_nested_state_get(struct kvm_vm * vm,uint32_t vcpuid,struct kvm_nested_state * state)1401*4882a593Smuzhiyun void vcpu_nested_state_get(struct kvm_vm *vm, uint32_t vcpuid,
1402*4882a593Smuzhiyun struct kvm_nested_state *state)
1403*4882a593Smuzhiyun {
1404*4882a593Smuzhiyun struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1405*4882a593Smuzhiyun int ret;
1406*4882a593Smuzhiyun
1407*4882a593Smuzhiyun TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1408*4882a593Smuzhiyun
1409*4882a593Smuzhiyun ret = ioctl(vcpu->fd, KVM_GET_NESTED_STATE, state);
1410*4882a593Smuzhiyun TEST_ASSERT(ret == 0,
1411*4882a593Smuzhiyun "KVM_SET_NESTED_STATE failed, ret: %i errno: %i",
1412*4882a593Smuzhiyun ret, errno);
1413*4882a593Smuzhiyun }
1414*4882a593Smuzhiyun
vcpu_nested_state_set(struct kvm_vm * vm,uint32_t vcpuid,struct kvm_nested_state * state,bool ignore_error)1415*4882a593Smuzhiyun int vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid,
1416*4882a593Smuzhiyun struct kvm_nested_state *state, bool ignore_error)
1417*4882a593Smuzhiyun {
1418*4882a593Smuzhiyun struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1419*4882a593Smuzhiyun int ret;
1420*4882a593Smuzhiyun
1421*4882a593Smuzhiyun TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1422*4882a593Smuzhiyun
1423*4882a593Smuzhiyun ret = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, state);
1424*4882a593Smuzhiyun if (!ignore_error) {
1425*4882a593Smuzhiyun TEST_ASSERT(ret == 0,
1426*4882a593Smuzhiyun "KVM_SET_NESTED_STATE failed, ret: %i errno: %i",
1427*4882a593Smuzhiyun ret, errno);
1428*4882a593Smuzhiyun }
1429*4882a593Smuzhiyun
1430*4882a593Smuzhiyun return ret;
1431*4882a593Smuzhiyun }
1432*4882a593Smuzhiyun #endif
1433*4882a593Smuzhiyun
1434*4882a593Smuzhiyun /*
1435*4882a593Smuzhiyun * VM VCPU System Regs Get
1436*4882a593Smuzhiyun *
1437*4882a593Smuzhiyun * Input Args:
1438*4882a593Smuzhiyun * vm - Virtual Machine
1439*4882a593Smuzhiyun * vcpuid - VCPU ID
1440*4882a593Smuzhiyun *
1441*4882a593Smuzhiyun * Output Args:
1442*4882a593Smuzhiyun * sregs - current state of VCPU system regs
1443*4882a593Smuzhiyun *
1444*4882a593Smuzhiyun * Return: None
1445*4882a593Smuzhiyun *
1446*4882a593Smuzhiyun * Obtains the current system register state for the VCPU specified by
1447*4882a593Smuzhiyun * vcpuid and stores it at the location given by sregs.
1448*4882a593Smuzhiyun */
vcpu_sregs_get(struct kvm_vm * vm,uint32_t vcpuid,struct kvm_sregs * sregs)1449*4882a593Smuzhiyun void vcpu_sregs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
1450*4882a593Smuzhiyun {
1451*4882a593Smuzhiyun struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1452*4882a593Smuzhiyun int ret;
1453*4882a593Smuzhiyun
1454*4882a593Smuzhiyun TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1455*4882a593Smuzhiyun
1456*4882a593Smuzhiyun ret = ioctl(vcpu->fd, KVM_GET_SREGS, sregs);
1457*4882a593Smuzhiyun TEST_ASSERT(ret == 0, "KVM_GET_SREGS failed, rc: %i errno: %i",
1458*4882a593Smuzhiyun ret, errno);
1459*4882a593Smuzhiyun }
1460*4882a593Smuzhiyun
1461*4882a593Smuzhiyun /*
1462*4882a593Smuzhiyun * VM VCPU System Regs Set
1463*4882a593Smuzhiyun *
1464*4882a593Smuzhiyun * Input Args:
1465*4882a593Smuzhiyun * vm - Virtual Machine
1466*4882a593Smuzhiyun * vcpuid - VCPU ID
1467*4882a593Smuzhiyun * sregs - Values to set VCPU system regs to
1468*4882a593Smuzhiyun *
1469*4882a593Smuzhiyun * Output Args: None
1470*4882a593Smuzhiyun *
1471*4882a593Smuzhiyun * Return: None
1472*4882a593Smuzhiyun *
1473*4882a593Smuzhiyun * Sets the system regs of the VCPU specified by vcpuid to the values
1474*4882a593Smuzhiyun * given by sregs.
1475*4882a593Smuzhiyun */
vcpu_sregs_set(struct kvm_vm * vm,uint32_t vcpuid,struct kvm_sregs * sregs)1476*4882a593Smuzhiyun void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
1477*4882a593Smuzhiyun {
1478*4882a593Smuzhiyun int ret = _vcpu_sregs_set(vm, vcpuid, sregs);
1479*4882a593Smuzhiyun TEST_ASSERT(ret == 0, "KVM_RUN IOCTL failed, "
1480*4882a593Smuzhiyun "rc: %i errno: %i", ret, errno);
1481*4882a593Smuzhiyun }
1482*4882a593Smuzhiyun
_vcpu_sregs_set(struct kvm_vm * vm,uint32_t vcpuid,struct kvm_sregs * sregs)1483*4882a593Smuzhiyun int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
1484*4882a593Smuzhiyun {
1485*4882a593Smuzhiyun struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1486*4882a593Smuzhiyun
1487*4882a593Smuzhiyun TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1488*4882a593Smuzhiyun
1489*4882a593Smuzhiyun return ioctl(vcpu->fd, KVM_SET_SREGS, sregs);
1490*4882a593Smuzhiyun }
1491*4882a593Smuzhiyun
vcpu_fpu_get(struct kvm_vm * vm,uint32_t vcpuid,struct kvm_fpu * fpu)1492*4882a593Smuzhiyun void vcpu_fpu_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_fpu *fpu)
1493*4882a593Smuzhiyun {
1494*4882a593Smuzhiyun int ret;
1495*4882a593Smuzhiyun
1496*4882a593Smuzhiyun ret = _vcpu_ioctl(vm, vcpuid, KVM_GET_FPU, fpu);
1497*4882a593Smuzhiyun TEST_ASSERT(ret == 0, "KVM_GET_FPU failed, rc: %i errno: %i (%s)",
1498*4882a593Smuzhiyun ret, errno, strerror(errno));
1499*4882a593Smuzhiyun }
1500*4882a593Smuzhiyun
vcpu_fpu_set(struct kvm_vm * vm,uint32_t vcpuid,struct kvm_fpu * fpu)1501*4882a593Smuzhiyun void vcpu_fpu_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_fpu *fpu)
1502*4882a593Smuzhiyun {
1503*4882a593Smuzhiyun int ret;
1504*4882a593Smuzhiyun
1505*4882a593Smuzhiyun ret = _vcpu_ioctl(vm, vcpuid, KVM_SET_FPU, fpu);
1506*4882a593Smuzhiyun TEST_ASSERT(ret == 0, "KVM_SET_FPU failed, rc: %i errno: %i (%s)",
1507*4882a593Smuzhiyun ret, errno, strerror(errno));
1508*4882a593Smuzhiyun }
1509*4882a593Smuzhiyun
vcpu_get_reg(struct kvm_vm * vm,uint32_t vcpuid,struct kvm_one_reg * reg)1510*4882a593Smuzhiyun void vcpu_get_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg)
1511*4882a593Smuzhiyun {
1512*4882a593Smuzhiyun int ret;
1513*4882a593Smuzhiyun
1514*4882a593Smuzhiyun ret = _vcpu_ioctl(vm, vcpuid, KVM_GET_ONE_REG, reg);
1515*4882a593Smuzhiyun TEST_ASSERT(ret == 0, "KVM_GET_ONE_REG failed, rc: %i errno: %i (%s)",
1516*4882a593Smuzhiyun ret, errno, strerror(errno));
1517*4882a593Smuzhiyun }
1518*4882a593Smuzhiyun
vcpu_set_reg(struct kvm_vm * vm,uint32_t vcpuid,struct kvm_one_reg * reg)1519*4882a593Smuzhiyun void vcpu_set_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg)
1520*4882a593Smuzhiyun {
1521*4882a593Smuzhiyun int ret;
1522*4882a593Smuzhiyun
1523*4882a593Smuzhiyun ret = _vcpu_ioctl(vm, vcpuid, KVM_SET_ONE_REG, reg);
1524*4882a593Smuzhiyun TEST_ASSERT(ret == 0, "KVM_SET_ONE_REG failed, rc: %i errno: %i (%s)",
1525*4882a593Smuzhiyun ret, errno, strerror(errno));
1526*4882a593Smuzhiyun }
1527*4882a593Smuzhiyun
1528*4882a593Smuzhiyun /*
1529*4882a593Smuzhiyun * VCPU Ioctl
1530*4882a593Smuzhiyun *
1531*4882a593Smuzhiyun * Input Args:
1532*4882a593Smuzhiyun * vm - Virtual Machine
1533*4882a593Smuzhiyun * vcpuid - VCPU ID
1534*4882a593Smuzhiyun * cmd - Ioctl number
1535*4882a593Smuzhiyun * arg - Argument to pass to the ioctl
1536*4882a593Smuzhiyun *
1537*4882a593Smuzhiyun * Return: None
1538*4882a593Smuzhiyun *
1539*4882a593Smuzhiyun * Issues an arbitrary ioctl on a VCPU fd.
1540*4882a593Smuzhiyun */
vcpu_ioctl(struct kvm_vm * vm,uint32_t vcpuid,unsigned long cmd,void * arg)1541*4882a593Smuzhiyun void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid,
1542*4882a593Smuzhiyun unsigned long cmd, void *arg)
1543*4882a593Smuzhiyun {
1544*4882a593Smuzhiyun int ret;
1545*4882a593Smuzhiyun
1546*4882a593Smuzhiyun ret = _vcpu_ioctl(vm, vcpuid, cmd, arg);
1547*4882a593Smuzhiyun TEST_ASSERT(ret == 0, "vcpu ioctl %lu failed, rc: %i errno: %i (%s)",
1548*4882a593Smuzhiyun cmd, ret, errno, strerror(errno));
1549*4882a593Smuzhiyun }
1550*4882a593Smuzhiyun
_vcpu_ioctl(struct kvm_vm * vm,uint32_t vcpuid,unsigned long cmd,void * arg)1551*4882a593Smuzhiyun int _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid,
1552*4882a593Smuzhiyun unsigned long cmd, void *arg)
1553*4882a593Smuzhiyun {
1554*4882a593Smuzhiyun struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1555*4882a593Smuzhiyun int ret;
1556*4882a593Smuzhiyun
1557*4882a593Smuzhiyun TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1558*4882a593Smuzhiyun
1559*4882a593Smuzhiyun ret = ioctl(vcpu->fd, cmd, arg);
1560*4882a593Smuzhiyun
1561*4882a593Smuzhiyun return ret;
1562*4882a593Smuzhiyun }
1563*4882a593Smuzhiyun
1564*4882a593Smuzhiyun /*
1565*4882a593Smuzhiyun * VM Ioctl
1566*4882a593Smuzhiyun *
1567*4882a593Smuzhiyun * Input Args:
1568*4882a593Smuzhiyun * vm - Virtual Machine
1569*4882a593Smuzhiyun * cmd - Ioctl number
1570*4882a593Smuzhiyun * arg - Argument to pass to the ioctl
1571*4882a593Smuzhiyun *
1572*4882a593Smuzhiyun * Return: None
1573*4882a593Smuzhiyun *
1574*4882a593Smuzhiyun * Issues an arbitrary ioctl on a VM fd.
1575*4882a593Smuzhiyun */
vm_ioctl(struct kvm_vm * vm,unsigned long cmd,void * arg)1576*4882a593Smuzhiyun void vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg)
1577*4882a593Smuzhiyun {
1578*4882a593Smuzhiyun int ret;
1579*4882a593Smuzhiyun
1580*4882a593Smuzhiyun ret = ioctl(vm->fd, cmd, arg);
1581*4882a593Smuzhiyun TEST_ASSERT(ret == 0, "vm ioctl %lu failed, rc: %i errno: %i (%s)",
1582*4882a593Smuzhiyun cmd, ret, errno, strerror(errno));
1583*4882a593Smuzhiyun }
1584*4882a593Smuzhiyun
1585*4882a593Smuzhiyun /*
1586*4882a593Smuzhiyun * VM Dump
1587*4882a593Smuzhiyun *
1588*4882a593Smuzhiyun * Input Args:
1589*4882a593Smuzhiyun * vm - Virtual Machine
1590*4882a593Smuzhiyun * indent - Left margin indent amount
1591*4882a593Smuzhiyun *
1592*4882a593Smuzhiyun * Output Args:
1593*4882a593Smuzhiyun * stream - Output FILE stream
1594*4882a593Smuzhiyun *
1595*4882a593Smuzhiyun * Return: None
1596*4882a593Smuzhiyun *
1597*4882a593Smuzhiyun * Dumps the current state of the VM given by vm, to the FILE stream
1598*4882a593Smuzhiyun * given by stream.
1599*4882a593Smuzhiyun */
vm_dump(FILE * stream,struct kvm_vm * vm,uint8_t indent)1600*4882a593Smuzhiyun void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
1601*4882a593Smuzhiyun {
1602*4882a593Smuzhiyun struct userspace_mem_region *region;
1603*4882a593Smuzhiyun struct vcpu *vcpu;
1604*4882a593Smuzhiyun
1605*4882a593Smuzhiyun fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode);
1606*4882a593Smuzhiyun fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd);
1607*4882a593Smuzhiyun fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size);
1608*4882a593Smuzhiyun fprintf(stream, "%*sMem Regions:\n", indent, "");
1609*4882a593Smuzhiyun list_for_each_entry(region, &vm->userspace_mem_regions, list) {
1610*4882a593Smuzhiyun fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx "
1611*4882a593Smuzhiyun "host_virt: %p\n", indent + 2, "",
1612*4882a593Smuzhiyun (uint64_t) region->region.guest_phys_addr,
1613*4882a593Smuzhiyun (uint64_t) region->region.memory_size,
1614*4882a593Smuzhiyun region->host_mem);
1615*4882a593Smuzhiyun fprintf(stream, "%*sunused_phy_pages: ", indent + 2, "");
1616*4882a593Smuzhiyun sparsebit_dump(stream, region->unused_phy_pages, 0);
1617*4882a593Smuzhiyun }
1618*4882a593Smuzhiyun fprintf(stream, "%*sMapped Virtual Pages:\n", indent, "");
1619*4882a593Smuzhiyun sparsebit_dump(stream, vm->vpages_mapped, indent + 2);
1620*4882a593Smuzhiyun fprintf(stream, "%*spgd_created: %u\n", indent, "",
1621*4882a593Smuzhiyun vm->pgd_created);
1622*4882a593Smuzhiyun if (vm->pgd_created) {
1623*4882a593Smuzhiyun fprintf(stream, "%*sVirtual Translation Tables:\n",
1624*4882a593Smuzhiyun indent + 2, "");
1625*4882a593Smuzhiyun virt_dump(stream, vm, indent + 4);
1626*4882a593Smuzhiyun }
1627*4882a593Smuzhiyun fprintf(stream, "%*sVCPUs:\n", indent, "");
1628*4882a593Smuzhiyun list_for_each_entry(vcpu, &vm->vcpus, list)
1629*4882a593Smuzhiyun vcpu_dump(stream, vm, vcpu->id, indent + 2);
1630*4882a593Smuzhiyun }
1631*4882a593Smuzhiyun
1632*4882a593Smuzhiyun /* Known KVM exit reasons */
1633*4882a593Smuzhiyun static struct exit_reason {
1634*4882a593Smuzhiyun unsigned int reason;
1635*4882a593Smuzhiyun const char *name;
1636*4882a593Smuzhiyun } exit_reasons_known[] = {
1637*4882a593Smuzhiyun {KVM_EXIT_UNKNOWN, "UNKNOWN"},
1638*4882a593Smuzhiyun {KVM_EXIT_EXCEPTION, "EXCEPTION"},
1639*4882a593Smuzhiyun {KVM_EXIT_IO, "IO"},
1640*4882a593Smuzhiyun {KVM_EXIT_HYPERCALL, "HYPERCALL"},
1641*4882a593Smuzhiyun {KVM_EXIT_DEBUG, "DEBUG"},
1642*4882a593Smuzhiyun {KVM_EXIT_HLT, "HLT"},
1643*4882a593Smuzhiyun {KVM_EXIT_MMIO, "MMIO"},
1644*4882a593Smuzhiyun {KVM_EXIT_IRQ_WINDOW_OPEN, "IRQ_WINDOW_OPEN"},
1645*4882a593Smuzhiyun {KVM_EXIT_SHUTDOWN, "SHUTDOWN"},
1646*4882a593Smuzhiyun {KVM_EXIT_FAIL_ENTRY, "FAIL_ENTRY"},
1647*4882a593Smuzhiyun {KVM_EXIT_INTR, "INTR"},
1648*4882a593Smuzhiyun {KVM_EXIT_SET_TPR, "SET_TPR"},
1649*4882a593Smuzhiyun {KVM_EXIT_TPR_ACCESS, "TPR_ACCESS"},
1650*4882a593Smuzhiyun {KVM_EXIT_S390_SIEIC, "S390_SIEIC"},
1651*4882a593Smuzhiyun {KVM_EXIT_S390_RESET, "S390_RESET"},
1652*4882a593Smuzhiyun {KVM_EXIT_DCR, "DCR"},
1653*4882a593Smuzhiyun {KVM_EXIT_NMI, "NMI"},
1654*4882a593Smuzhiyun {KVM_EXIT_INTERNAL_ERROR, "INTERNAL_ERROR"},
1655*4882a593Smuzhiyun {KVM_EXIT_OSI, "OSI"},
1656*4882a593Smuzhiyun {KVM_EXIT_PAPR_HCALL, "PAPR_HCALL"},
1657*4882a593Smuzhiyun #ifdef KVM_EXIT_MEMORY_NOT_PRESENT
1658*4882a593Smuzhiyun {KVM_EXIT_MEMORY_NOT_PRESENT, "MEMORY_NOT_PRESENT"},
1659*4882a593Smuzhiyun #endif
1660*4882a593Smuzhiyun };
1661*4882a593Smuzhiyun
1662*4882a593Smuzhiyun /*
1663*4882a593Smuzhiyun * Exit Reason String
1664*4882a593Smuzhiyun *
1665*4882a593Smuzhiyun * Input Args:
1666*4882a593Smuzhiyun * exit_reason - Exit reason
1667*4882a593Smuzhiyun *
1668*4882a593Smuzhiyun * Output Args: None
1669*4882a593Smuzhiyun *
1670*4882a593Smuzhiyun * Return:
1671*4882a593Smuzhiyun * Constant string pointer describing the exit reason.
1672*4882a593Smuzhiyun *
1673*4882a593Smuzhiyun * Locates and returns a constant string that describes the KVM exit
1674*4882a593Smuzhiyun * reason given by exit_reason. If no such string is found, a constant
1675*4882a593Smuzhiyun * string of "Unknown" is returned.
1676*4882a593Smuzhiyun */
exit_reason_str(unsigned int exit_reason)1677*4882a593Smuzhiyun const char *exit_reason_str(unsigned int exit_reason)
1678*4882a593Smuzhiyun {
1679*4882a593Smuzhiyun unsigned int n1;
1680*4882a593Smuzhiyun
1681*4882a593Smuzhiyun for (n1 = 0; n1 < ARRAY_SIZE(exit_reasons_known); n1++) {
1682*4882a593Smuzhiyun if (exit_reason == exit_reasons_known[n1].reason)
1683*4882a593Smuzhiyun return exit_reasons_known[n1].name;
1684*4882a593Smuzhiyun }
1685*4882a593Smuzhiyun
1686*4882a593Smuzhiyun return "Unknown";
1687*4882a593Smuzhiyun }
1688*4882a593Smuzhiyun
1689*4882a593Smuzhiyun /*
1690*4882a593Smuzhiyun * Physical Contiguous Page Allocator
1691*4882a593Smuzhiyun *
1692*4882a593Smuzhiyun * Input Args:
1693*4882a593Smuzhiyun * vm - Virtual Machine
1694*4882a593Smuzhiyun * num - number of pages
1695*4882a593Smuzhiyun * paddr_min - Physical address minimum
1696*4882a593Smuzhiyun * memslot - Memory region to allocate page from
1697*4882a593Smuzhiyun *
1698*4882a593Smuzhiyun * Output Args: None
1699*4882a593Smuzhiyun *
1700*4882a593Smuzhiyun * Return:
1701*4882a593Smuzhiyun * Starting physical address
1702*4882a593Smuzhiyun *
1703*4882a593Smuzhiyun * Within the VM specified by vm, locates a range of available physical
1704*4882a593Smuzhiyun * pages at or above paddr_min. If found, the pages are marked as in use
1705*4882a593Smuzhiyun * and their base address is returned. A TEST_ASSERT failure occurs if
1706*4882a593Smuzhiyun * not enough pages are available at or above paddr_min.
1707*4882a593Smuzhiyun */
vm_phy_pages_alloc(struct kvm_vm * vm,size_t num,vm_paddr_t paddr_min,uint32_t memslot)1708*4882a593Smuzhiyun vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
1709*4882a593Smuzhiyun vm_paddr_t paddr_min, uint32_t memslot)
1710*4882a593Smuzhiyun {
1711*4882a593Smuzhiyun struct userspace_mem_region *region;
1712*4882a593Smuzhiyun sparsebit_idx_t pg, base;
1713*4882a593Smuzhiyun
1714*4882a593Smuzhiyun TEST_ASSERT(num > 0, "Must allocate at least one page");
1715*4882a593Smuzhiyun
1716*4882a593Smuzhiyun TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
1717*4882a593Smuzhiyun "not divisible by page size.\n"
1718*4882a593Smuzhiyun " paddr_min: 0x%lx page_size: 0x%x",
1719*4882a593Smuzhiyun paddr_min, vm->page_size);
1720*4882a593Smuzhiyun
1721*4882a593Smuzhiyun region = memslot2region(vm, memslot);
1722*4882a593Smuzhiyun base = pg = paddr_min >> vm->page_shift;
1723*4882a593Smuzhiyun
1724*4882a593Smuzhiyun do {
1725*4882a593Smuzhiyun for (; pg < base + num; ++pg) {
1726*4882a593Smuzhiyun if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
1727*4882a593Smuzhiyun base = pg = sparsebit_next_set(region->unused_phy_pages, pg);
1728*4882a593Smuzhiyun break;
1729*4882a593Smuzhiyun }
1730*4882a593Smuzhiyun }
1731*4882a593Smuzhiyun } while (pg && pg != base + num);
1732*4882a593Smuzhiyun
1733*4882a593Smuzhiyun if (pg == 0) {
1734*4882a593Smuzhiyun fprintf(stderr, "No guest physical page available, "
1735*4882a593Smuzhiyun "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
1736*4882a593Smuzhiyun paddr_min, vm->page_size, memslot);
1737*4882a593Smuzhiyun fputs("---- vm dump ----\n", stderr);
1738*4882a593Smuzhiyun vm_dump(stderr, vm, 2);
1739*4882a593Smuzhiyun abort();
1740*4882a593Smuzhiyun }
1741*4882a593Smuzhiyun
1742*4882a593Smuzhiyun for (pg = base; pg < base + num; ++pg)
1743*4882a593Smuzhiyun sparsebit_clear(region->unused_phy_pages, pg);
1744*4882a593Smuzhiyun
1745*4882a593Smuzhiyun return base * vm->page_size;
1746*4882a593Smuzhiyun }
1747*4882a593Smuzhiyun
vm_phy_page_alloc(struct kvm_vm * vm,vm_paddr_t paddr_min,uint32_t memslot)1748*4882a593Smuzhiyun vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
1749*4882a593Smuzhiyun uint32_t memslot)
1750*4882a593Smuzhiyun {
1751*4882a593Smuzhiyun return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
1752*4882a593Smuzhiyun }
1753*4882a593Smuzhiyun
1754*4882a593Smuzhiyun /*
1755*4882a593Smuzhiyun * Address Guest Virtual to Host Virtual
1756*4882a593Smuzhiyun *
1757*4882a593Smuzhiyun * Input Args:
1758*4882a593Smuzhiyun * vm - Virtual Machine
1759*4882a593Smuzhiyun * gva - VM virtual address
1760*4882a593Smuzhiyun *
1761*4882a593Smuzhiyun * Output Args: None
1762*4882a593Smuzhiyun *
1763*4882a593Smuzhiyun * Return:
1764*4882a593Smuzhiyun * Equivalent host virtual address
1765*4882a593Smuzhiyun */
addr_gva2hva(struct kvm_vm * vm,vm_vaddr_t gva)1766*4882a593Smuzhiyun void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva)
1767*4882a593Smuzhiyun {
1768*4882a593Smuzhiyun return addr_gpa2hva(vm, addr_gva2gpa(vm, gva));
1769*4882a593Smuzhiyun }
1770*4882a593Smuzhiyun
1771*4882a593Smuzhiyun /*
1772*4882a593Smuzhiyun * Is Unrestricted Guest
1773*4882a593Smuzhiyun *
1774*4882a593Smuzhiyun * Input Args:
1775*4882a593Smuzhiyun * vm - Virtual Machine
1776*4882a593Smuzhiyun *
1777*4882a593Smuzhiyun * Output Args: None
1778*4882a593Smuzhiyun *
1779*4882a593Smuzhiyun * Return: True if the unrestricted guest is set to 'Y', otherwise return false.
1780*4882a593Smuzhiyun *
1781*4882a593Smuzhiyun * Check if the unrestricted guest flag is enabled.
1782*4882a593Smuzhiyun */
vm_is_unrestricted_guest(struct kvm_vm * vm)1783*4882a593Smuzhiyun bool vm_is_unrestricted_guest(struct kvm_vm *vm)
1784*4882a593Smuzhiyun {
1785*4882a593Smuzhiyun char val = 'N';
1786*4882a593Smuzhiyun size_t count;
1787*4882a593Smuzhiyun FILE *f;
1788*4882a593Smuzhiyun
1789*4882a593Smuzhiyun if (vm == NULL) {
1790*4882a593Smuzhiyun /* Ensure that the KVM vendor-specific module is loaded. */
1791*4882a593Smuzhiyun f = fopen(KVM_DEV_PATH, "r");
1792*4882a593Smuzhiyun TEST_ASSERT(f != NULL, "Error in opening KVM dev file: %d",
1793*4882a593Smuzhiyun errno);
1794*4882a593Smuzhiyun fclose(f);
1795*4882a593Smuzhiyun }
1796*4882a593Smuzhiyun
1797*4882a593Smuzhiyun f = fopen("/sys/module/kvm_intel/parameters/unrestricted_guest", "r");
1798*4882a593Smuzhiyun if (f) {
1799*4882a593Smuzhiyun count = fread(&val, sizeof(char), 1, f);
1800*4882a593Smuzhiyun TEST_ASSERT(count == 1, "Unable to read from param file.");
1801*4882a593Smuzhiyun fclose(f);
1802*4882a593Smuzhiyun }
1803*4882a593Smuzhiyun
1804*4882a593Smuzhiyun return val == 'Y';
1805*4882a593Smuzhiyun }
1806*4882a593Smuzhiyun
vm_get_page_size(struct kvm_vm * vm)1807*4882a593Smuzhiyun unsigned int vm_get_page_size(struct kvm_vm *vm)
1808*4882a593Smuzhiyun {
1809*4882a593Smuzhiyun return vm->page_size;
1810*4882a593Smuzhiyun }
1811*4882a593Smuzhiyun
vm_get_page_shift(struct kvm_vm * vm)1812*4882a593Smuzhiyun unsigned int vm_get_page_shift(struct kvm_vm *vm)
1813*4882a593Smuzhiyun {
1814*4882a593Smuzhiyun return vm->page_shift;
1815*4882a593Smuzhiyun }
1816*4882a593Smuzhiyun
vm_get_max_gfn(struct kvm_vm * vm)1817*4882a593Smuzhiyun unsigned int vm_get_max_gfn(struct kvm_vm *vm)
1818*4882a593Smuzhiyun {
1819*4882a593Smuzhiyun return vm->max_gfn;
1820*4882a593Smuzhiyun }
1821*4882a593Smuzhiyun
vm_get_fd(struct kvm_vm * vm)1822*4882a593Smuzhiyun int vm_get_fd(struct kvm_vm *vm)
1823*4882a593Smuzhiyun {
1824*4882a593Smuzhiyun return vm->fd;
1825*4882a593Smuzhiyun }
1826*4882a593Smuzhiyun
vm_calc_num_pages(unsigned int num_pages,unsigned int page_shift,unsigned int new_page_shift,bool ceil)1827*4882a593Smuzhiyun static unsigned int vm_calc_num_pages(unsigned int num_pages,
1828*4882a593Smuzhiyun unsigned int page_shift,
1829*4882a593Smuzhiyun unsigned int new_page_shift,
1830*4882a593Smuzhiyun bool ceil)
1831*4882a593Smuzhiyun {
1832*4882a593Smuzhiyun unsigned int n = 1 << (new_page_shift - page_shift);
1833*4882a593Smuzhiyun
1834*4882a593Smuzhiyun if (page_shift >= new_page_shift)
1835*4882a593Smuzhiyun return num_pages * (1 << (page_shift - new_page_shift));
1836*4882a593Smuzhiyun
1837*4882a593Smuzhiyun return num_pages / n + !!(ceil && num_pages % n);
1838*4882a593Smuzhiyun }
1839*4882a593Smuzhiyun
getpageshift(void)1840*4882a593Smuzhiyun static inline int getpageshift(void)
1841*4882a593Smuzhiyun {
1842*4882a593Smuzhiyun return __builtin_ffs(getpagesize()) - 1;
1843*4882a593Smuzhiyun }
1844*4882a593Smuzhiyun
1845*4882a593Smuzhiyun unsigned int
vm_num_host_pages(enum vm_guest_mode mode,unsigned int num_guest_pages)1846*4882a593Smuzhiyun vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
1847*4882a593Smuzhiyun {
1848*4882a593Smuzhiyun return vm_calc_num_pages(num_guest_pages,
1849*4882a593Smuzhiyun vm_guest_mode_params[mode].page_shift,
1850*4882a593Smuzhiyun getpageshift(), true);
1851*4882a593Smuzhiyun }
1852*4882a593Smuzhiyun
1853*4882a593Smuzhiyun unsigned int
vm_num_guest_pages(enum vm_guest_mode mode,unsigned int num_host_pages)1854*4882a593Smuzhiyun vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages)
1855*4882a593Smuzhiyun {
1856*4882a593Smuzhiyun return vm_calc_num_pages(num_host_pages, getpageshift(),
1857*4882a593Smuzhiyun vm_guest_mode_params[mode].page_shift, false);
1858*4882a593Smuzhiyun }
1859*4882a593Smuzhiyun
vm_calc_num_guest_pages(enum vm_guest_mode mode,size_t size)1860*4882a593Smuzhiyun unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size)
1861*4882a593Smuzhiyun {
1862*4882a593Smuzhiyun unsigned int n;
1863*4882a593Smuzhiyun n = DIV_ROUND_UP(size, vm_guest_mode_params[mode].page_size);
1864*4882a593Smuzhiyun return vm_adjust_num_guest_pages(mode, n);
1865*4882a593Smuzhiyun }
1866