Lines Matching refs:kvm
153 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
157 __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, in kvm_arch_mmu_notifier_invalidate_range() argument
162 __weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) in kvm_arch_guest_memory_reclaimed() argument
263 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, in kvm_make_vcpus_request_mask() argument
273 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_make_vcpus_request_mask()
295 bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, in kvm_make_all_cpus_request_except() argument
303 called = kvm_make_vcpus_request_mask(kvm, req, except, NULL, cpus); in kvm_make_all_cpus_request_except()
309 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) in kvm_make_all_cpus_request() argument
311 return kvm_make_all_cpus_request_except(kvm, req, NULL); in kvm_make_all_cpus_request()
315 void kvm_flush_remote_tlbs(struct kvm *kvm) in kvm_flush_remote_tlbs() argument
321 long dirty_count = smp_load_acquire(&kvm->tlbs_dirty); in kvm_flush_remote_tlbs()
334 if (!kvm_arch_flush_remote_tlb(kvm) in kvm_flush_remote_tlbs()
335 || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) in kvm_flush_remote_tlbs()
336 ++kvm->stat.remote_tlb_flush; in kvm_flush_remote_tlbs()
337 cmpxchg(&kvm->tlbs_dirty, dirty_count, 0); in kvm_flush_remote_tlbs()
342 void kvm_reload_remote_mmus(struct kvm *kvm) in kvm_reload_remote_mmus() argument
344 kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); in kvm_reload_remote_mmus()
347 static void kvm_flush_shadow_all(struct kvm *kvm) in kvm_flush_shadow_all() argument
349 kvm_arch_flush_shadow_all(kvm); in kvm_flush_shadow_all()
350 kvm_arch_guest_memory_reclaimed(kvm); in kvm_flush_shadow_all()
408 static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) in kvm_vcpu_init() argument
412 vcpu->kvm = kvm; in kvm_vcpu_init()
445 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) in mmu_notifier_to_kvm()
447 return container_of(mn, struct kvm, mmu_notifier); in mmu_notifier_to_kvm()
454 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_invalidate_range() local
457 idx = srcu_read_lock(&kvm->srcu); in kvm_mmu_notifier_invalidate_range()
458 kvm_arch_mmu_notifier_invalidate_range(kvm, start, end); in kvm_mmu_notifier_invalidate_range()
459 srcu_read_unlock(&kvm->srcu, idx); in kvm_mmu_notifier_invalidate_range()
467 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_change_pte() local
470 idx = srcu_read_lock(&kvm->srcu); in kvm_mmu_notifier_change_pte()
471 spin_lock(&kvm->mmu_lock); in kvm_mmu_notifier_change_pte()
472 kvm->mmu_notifier_seq++; in kvm_mmu_notifier_change_pte()
474 if (kvm_set_spte_hva(kvm, address, pte)) in kvm_mmu_notifier_change_pte()
475 kvm_flush_remote_tlbs(kvm); in kvm_mmu_notifier_change_pte()
477 spin_unlock(&kvm->mmu_lock); in kvm_mmu_notifier_change_pte()
478 srcu_read_unlock(&kvm->srcu, idx); in kvm_mmu_notifier_change_pte()
484 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_invalidate_range_start() local
487 idx = srcu_read_lock(&kvm->srcu); in kvm_mmu_notifier_invalidate_range_start()
488 spin_lock(&kvm->mmu_lock); in kvm_mmu_notifier_invalidate_range_start()
494 kvm->mmu_notifier_count++; in kvm_mmu_notifier_invalidate_range_start()
495 need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end, in kvm_mmu_notifier_invalidate_range_start()
498 if (need_tlb_flush || kvm->tlbs_dirty) in kvm_mmu_notifier_invalidate_range_start()
499 kvm_flush_remote_tlbs(kvm); in kvm_mmu_notifier_invalidate_range_start()
501 spin_unlock(&kvm->mmu_lock); in kvm_mmu_notifier_invalidate_range_start()
502 kvm_arch_guest_memory_reclaimed(kvm); in kvm_mmu_notifier_invalidate_range_start()
503 srcu_read_unlock(&kvm->srcu, idx); in kvm_mmu_notifier_invalidate_range_start()
511 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_invalidate_range_end() local
513 spin_lock(&kvm->mmu_lock); in kvm_mmu_notifier_invalidate_range_end()
519 kvm->mmu_notifier_seq++; in kvm_mmu_notifier_invalidate_range_end()
526 kvm->mmu_notifier_count--; in kvm_mmu_notifier_invalidate_range_end()
527 spin_unlock(&kvm->mmu_lock); in kvm_mmu_notifier_invalidate_range_end()
529 BUG_ON(kvm->mmu_notifier_count < 0); in kvm_mmu_notifier_invalidate_range_end()
537 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_clear_flush_young() local
540 idx = srcu_read_lock(&kvm->srcu); in kvm_mmu_notifier_clear_flush_young()
541 spin_lock(&kvm->mmu_lock); in kvm_mmu_notifier_clear_flush_young()
543 young = kvm_age_hva(kvm, start, end); in kvm_mmu_notifier_clear_flush_young()
545 kvm_flush_remote_tlbs(kvm); in kvm_mmu_notifier_clear_flush_young()
547 spin_unlock(&kvm->mmu_lock); in kvm_mmu_notifier_clear_flush_young()
548 srcu_read_unlock(&kvm->srcu, idx); in kvm_mmu_notifier_clear_flush_young()
558 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_clear_young() local
561 idx = srcu_read_lock(&kvm->srcu); in kvm_mmu_notifier_clear_young()
562 spin_lock(&kvm->mmu_lock); in kvm_mmu_notifier_clear_young()
576 young = kvm_age_hva(kvm, start, end); in kvm_mmu_notifier_clear_young()
577 spin_unlock(&kvm->mmu_lock); in kvm_mmu_notifier_clear_young()
578 srcu_read_unlock(&kvm->srcu, idx); in kvm_mmu_notifier_clear_young()
587 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_test_young() local
590 idx = srcu_read_lock(&kvm->srcu); in kvm_mmu_notifier_test_young()
591 spin_lock(&kvm->mmu_lock); in kvm_mmu_notifier_test_young()
592 young = kvm_test_age_hva(kvm, address); in kvm_mmu_notifier_test_young()
593 spin_unlock(&kvm->mmu_lock); in kvm_mmu_notifier_test_young()
594 srcu_read_unlock(&kvm->srcu, idx); in kvm_mmu_notifier_test_young()
602 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_release() local
605 idx = srcu_read_lock(&kvm->srcu); in kvm_mmu_notifier_release()
606 kvm_flush_shadow_all(kvm); in kvm_mmu_notifier_release()
607 srcu_read_unlock(&kvm->srcu, idx); in kvm_mmu_notifier_release()
621 static int kvm_init_mmu_notifier(struct kvm *kvm) in kvm_init_mmu_notifier() argument
623 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; in kvm_init_mmu_notifier()
624 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); in kvm_init_mmu_notifier()
629 static int kvm_init_mmu_notifier(struct kvm *kvm) in kvm_init_mmu_notifier() argument
660 static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) in kvm_free_memslot() argument
664 kvm_arch_free_memslot(kvm, slot); in kvm_free_memslot()
670 static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots) in kvm_free_memslots() argument
678 kvm_free_memslot(kvm, memslot); in kvm_free_memslots()
683 static void kvm_destroy_vm_debugfs(struct kvm *kvm) in kvm_destroy_vm_debugfs() argument
687 if (!kvm->debugfs_dentry) in kvm_destroy_vm_debugfs()
690 debugfs_remove_recursive(kvm->debugfs_dentry); in kvm_destroy_vm_debugfs()
692 if (kvm->debugfs_stat_data) { in kvm_destroy_vm_debugfs()
694 kfree(kvm->debugfs_stat_data[i]); in kvm_destroy_vm_debugfs()
695 kfree(kvm->debugfs_stat_data); in kvm_destroy_vm_debugfs()
699 static int kvm_create_vm_debugfs(struct kvm *kvm, int fd) in kvm_create_vm_debugfs() argument
724 kvm->debugfs_dentry = dent; in kvm_create_vm_debugfs()
725 kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries, in kvm_create_vm_debugfs()
726 sizeof(*kvm->debugfs_stat_data), in kvm_create_vm_debugfs()
728 if (!kvm->debugfs_stat_data) in kvm_create_vm_debugfs()
736 stat_data->kvm = kvm; in kvm_create_vm_debugfs()
738 kvm->debugfs_stat_data[p - debugfs_entries] = stat_data; in kvm_create_vm_debugfs()
740 kvm->debugfs_dentry, stat_data, in kvm_create_vm_debugfs()
750 int __weak kvm_arch_post_init_vm(struct kvm *kvm) in kvm_arch_post_init_vm() argument
759 void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm) in kvm_arch_pre_destroy_vm() argument
763 static struct kvm *kvm_create_vm(unsigned long type) in kvm_create_vm()
765 struct kvm *kvm = kvm_arch_alloc_vm(); in kvm_create_vm() local
769 if (!kvm) in kvm_create_vm()
772 spin_lock_init(&kvm->mmu_lock); in kvm_create_vm()
774 kvm->mm = current->mm; in kvm_create_vm()
775 kvm_eventfd_init(kvm); in kvm_create_vm()
776 mutex_init(&kvm->lock); in kvm_create_vm()
777 mutex_init(&kvm->irq_lock); in kvm_create_vm()
778 mutex_init(&kvm->slots_lock); in kvm_create_vm()
779 INIT_LIST_HEAD(&kvm->devices); in kvm_create_vm()
783 if (init_srcu_struct(&kvm->srcu)) in kvm_create_vm()
785 if (init_srcu_struct(&kvm->irq_srcu)) in kvm_create_vm()
788 refcount_set(&kvm->users_count, 1); in kvm_create_vm()
796 rcu_assign_pointer(kvm->memslots[i], slots); in kvm_create_vm()
800 rcu_assign_pointer(kvm->buses[i], in kvm_create_vm()
802 if (!kvm->buses[i]) in kvm_create_vm()
806 kvm->max_halt_poll_ns = halt_poll_ns; in kvm_create_vm()
808 r = kvm_arch_init_vm(kvm, type); in kvm_create_vm()
817 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); in kvm_create_vm()
820 r = kvm_init_mmu_notifier(kvm); in kvm_create_vm()
824 r = kvm_arch_post_init_vm(kvm); in kvm_create_vm()
829 list_add(&kvm->vm_list, &vm_list); in kvm_create_vm()
844 return kvm; in kvm_create_vm()
848 if (kvm->mmu_notifier.ops) in kvm_create_vm()
849 mmu_notifier_unregister(&kvm->mmu_notifier, current->mm); in kvm_create_vm()
854 kvm_arch_destroy_vm(kvm); in kvm_create_vm()
856 WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count)); in kvm_create_vm()
858 kfree(kvm_get_bus(kvm, i)); in kvm_create_vm()
860 kvm_free_memslots(kvm, __kvm_memslots(kvm, i)); in kvm_create_vm()
861 cleanup_srcu_struct(&kvm->irq_srcu); in kvm_create_vm()
863 cleanup_srcu_struct(&kvm->srcu); in kvm_create_vm()
865 kvm_arch_free_vm(kvm); in kvm_create_vm()
870 static void kvm_destroy_devices(struct kvm *kvm) in kvm_destroy_devices() argument
879 list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) { in kvm_destroy_devices()
885 static void kvm_destroy_vm(struct kvm *kvm) in kvm_destroy_vm() argument
888 struct mm_struct *mm = kvm->mm; in kvm_destroy_vm()
890 kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm); in kvm_destroy_vm()
891 kvm_destroy_vm_debugfs(kvm); in kvm_destroy_vm()
892 kvm_arch_sync_events(kvm); in kvm_destroy_vm()
894 list_del(&kvm->vm_list); in kvm_destroy_vm()
896 kvm_arch_pre_destroy_vm(kvm); in kvm_destroy_vm()
898 kvm_free_irq_routing(kvm); in kvm_destroy_vm()
900 struct kvm_io_bus *bus = kvm_get_bus(kvm, i); in kvm_destroy_vm()
904 kvm->buses[i] = NULL; in kvm_destroy_vm()
906 kvm_coalesced_mmio_free(kvm); in kvm_destroy_vm()
908 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); in kvm_destroy_vm()
910 kvm_flush_shadow_all(kvm); in kvm_destroy_vm()
912 kvm_arch_destroy_vm(kvm); in kvm_destroy_vm()
913 kvm_destroy_devices(kvm); in kvm_destroy_vm()
915 kvm_free_memslots(kvm, __kvm_memslots(kvm, i)); in kvm_destroy_vm()
916 cleanup_srcu_struct(&kvm->irq_srcu); in kvm_destroy_vm()
917 cleanup_srcu_struct(&kvm->srcu); in kvm_destroy_vm()
918 kvm_arch_free_vm(kvm); in kvm_destroy_vm()
925 void kvm_get_kvm(struct kvm *kvm) in kvm_get_kvm() argument
927 refcount_inc(&kvm->users_count); in kvm_get_kvm()
931 void kvm_put_kvm(struct kvm *kvm) in kvm_put_kvm() argument
933 if (refcount_dec_and_test(&kvm->users_count)) in kvm_put_kvm()
934 kvm_destroy_vm(kvm); in kvm_put_kvm()
945 void kvm_put_kvm_no_destroy(struct kvm *kvm) in kvm_put_kvm_no_destroy() argument
947 WARN_ON(refcount_dec_and_test(&kvm->users_count)); in kvm_put_kvm_no_destroy()
953 struct kvm *kvm = filp->private_data; in kvm_vm_release() local
955 kvm_irqfd_release(kvm); in kvm_vm_release()
957 kvm_put_kvm(kvm); in kvm_vm_release()
1152 static struct kvm_memslots *install_new_memslots(struct kvm *kvm, in install_new_memslots() argument
1155 struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id); in install_new_memslots()
1161 rcu_assign_pointer(kvm->memslots[as_id], slots); in install_new_memslots()
1162 synchronize_srcu_expedited(&kvm->srcu); in install_new_memslots()
1181 kvm_arch_memslots_updated(kvm, gen); in install_new_memslots()
1214 static int kvm_set_memslot(struct kvm *kvm, in kvm_set_memslot() argument
1224 slots = kvm_dup_memslots(__kvm_memslots(kvm, as_id), change); in kvm_set_memslot()
1242 slots = install_new_memslots(kvm, as_id, slots); in kvm_set_memslot()
1251 kvm_arch_flush_shadow_memslot(kvm, slot); in kvm_set_memslot()
1252 kvm_arch_guest_memory_reclaimed(kvm); in kvm_set_memslot()
1255 r = kvm_arch_prepare_memory_region(kvm, new, mem, change); in kvm_set_memslot()
1260 slots = install_new_memslots(kvm, as_id, slots); in kvm_set_memslot()
1262 kvm_arch_commit_memory_region(kvm, mem, old, new, change); in kvm_set_memslot()
1269 slots = install_new_memslots(kvm, as_id, slots); in kvm_set_memslot()
1274 static int kvm_delete_memslot(struct kvm *kvm, in kvm_delete_memslot() argument
1292 r = kvm_set_memslot(kvm, mem, old, &new, as_id, KVM_MR_DELETE); in kvm_delete_memslot()
1296 kvm_free_memslot(kvm, old); in kvm_delete_memslot()
1308 int __kvm_set_memory_region(struct kvm *kvm, in __kvm_set_memory_region() argument
1347 tmp = id_to_memslot(__kvm_memslots(kvm, as_id), id); in __kvm_set_memory_region()
1357 return kvm_delete_memslot(kvm, mem, &old, as_id); in __kvm_set_memory_region()
1393 kvm_for_each_memslot(tmp, __kvm_memslots(kvm, as_id)) { in __kvm_set_memory_region()
1410 if (kvm_dirty_log_manual_protect_and_init_set(kvm)) in __kvm_set_memory_region()
1414 r = kvm_set_memslot(kvm, mem, &old, &new, as_id, change); in __kvm_set_memory_region()
1429 int kvm_set_memory_region(struct kvm *kvm, in kvm_set_memory_region() argument
1434 mutex_lock(&kvm->slots_lock); in kvm_set_memory_region()
1435 r = __kvm_set_memory_region(kvm, mem); in kvm_set_memory_region()
1436 mutex_unlock(&kvm->slots_lock); in kvm_set_memory_region()
1441 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, in kvm_vm_ioctl_set_memory_region() argument
1447 return kvm_set_memory_region(kvm, mem); in kvm_vm_ioctl_set_memory_region()
1458 int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, in kvm_get_dirty_log() argument
1474 slots = __kvm_memslots(kvm, as_id); in kvm_get_dirty_log()
1479 kvm_arch_sync_dirty_log(kvm, *memslot); in kvm_get_dirty_log()
1517 static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log) in kvm_get_dirty_log_protect() argument
1532 slots = __kvm_memslots(kvm, as_id); in kvm_get_dirty_log_protect()
1539 kvm_arch_sync_dirty_log(kvm, memslot); in kvm_get_dirty_log_protect()
1543 if (kvm->manual_dirty_log_protect) { in kvm_get_dirty_log_protect()
1557 spin_lock(&kvm->mmu_lock); in kvm_get_dirty_log_protect()
1570 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, in kvm_get_dirty_log_protect()
1573 spin_unlock(&kvm->mmu_lock); in kvm_get_dirty_log_protect()
1577 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); in kvm_get_dirty_log_protect()
1604 static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, in kvm_vm_ioctl_get_dirty_log() argument
1609 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
1611 r = kvm_get_dirty_log_protect(kvm, log); in kvm_vm_ioctl_get_dirty_log()
1613 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
1623 static int kvm_clear_dirty_log_protect(struct kvm *kvm, in kvm_clear_dirty_log_protect() argument
1643 slots = __kvm_memslots(kvm, as_id); in kvm_clear_dirty_log_protect()
1657 kvm_arch_sync_dirty_log(kvm, memslot); in kvm_clear_dirty_log_protect()
1664 spin_lock(&kvm->mmu_lock); in kvm_clear_dirty_log_protect()
1683 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, in kvm_clear_dirty_log_protect()
1687 spin_unlock(&kvm->mmu_lock); in kvm_clear_dirty_log_protect()
1690 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); in kvm_clear_dirty_log_protect()
1695 static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, in kvm_vm_ioctl_clear_dirty_log() argument
1700 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_clear_dirty_log()
1702 r = kvm_clear_dirty_log_protect(kvm, log); in kvm_vm_ioctl_clear_dirty_log()
1704 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_clear_dirty_log()
1709 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) in gfn_to_memslot() argument
1711 return __gfn_to_memslot(kvm_memslots(kvm), gfn); in gfn_to_memslot()
1720 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) in kvm_is_visible_gfn() argument
1722 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); in kvm_is_visible_gfn()
1793 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) in gfn_to_hva() argument
1795 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); in gfn_to_hva()
1824 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) in gfn_to_hva_prot() argument
1826 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); in gfn_to_hva_prot()
2089 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, in gfn_to_pfn_prot() argument
2092 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL, in gfn_to_pfn_prot()
2115 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) in gfn_to_pfn() argument
2117 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn); in gfn_to_pfn()
2157 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) in gfn_to_page() argument
2161 pfn = gfn_to_pfn(kvm, gfn); in gfn_to_page()
2250 return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map, in kvm_map_gfn()
2301 __kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map, in kvm_unmap_gfn()
2410 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, in kvm_read_guest_page() argument
2413 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); in kvm_read_guest_page()
2428 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) in kvm_read_guest() argument
2436 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); in kvm_read_guest()
2512 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, in kvm_write_guest_page() argument
2515 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); in kvm_write_guest_page()
2530 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, in kvm_write_guest() argument
2539 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); in kvm_write_guest()
2613 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, in kvm_gfn_to_hva_cache_init() argument
2616 struct kvm_memslots *slots = kvm_memslots(kvm); in kvm_gfn_to_hva_cache_init()
2621 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, in kvm_write_guest_offset_cached() argument
2625 struct kvm_memslots *slots = kvm_memslots(kvm); in kvm_write_guest_offset_cached()
2641 return kvm_write_guest(kvm, gpa, data, len); in kvm_write_guest_offset_cached()
2652 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, in kvm_write_guest_cached() argument
2655 return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len); in kvm_write_guest_cached()
2659 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, in kvm_read_guest_offset_cached() argument
2663 struct kvm_memslots *slots = kvm_memslots(kvm); in kvm_read_guest_offset_cached()
2679 return kvm_read_guest(kvm, gpa, data, len); in kvm_read_guest_offset_cached()
2689 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, in kvm_read_guest_cached() argument
2692 return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len); in kvm_read_guest_cached()
2696 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) in kvm_clear_guest_page() argument
2700 return kvm_write_guest_page(kvm, gfn, zero_page, offset, len); in kvm_clear_guest_page()
2704 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) in kvm_clear_guest() argument
2712 ret = kvm_clear_guest_page(kvm, gfn, offset, seg); in kvm_clear_guest()
2733 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) in mark_page_dirty() argument
2737 memslot = gfn_to_memslot(kvm, gfn); in mark_page_dirty()
2788 if (val > vcpu->kvm->max_halt_poll_ns) in grow_halt_poll_ns()
2789 val = vcpu->kvm->max_halt_poll_ns; in grow_halt_poll_ns()
2818 int idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_vcpu_check_block()
2831 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_vcpu_check_block()
2898 } else if (vcpu->kvm->max_halt_poll_ns) { in kvm_vcpu_block()
2903 block_ns > vcpu->kvm->max_halt_poll_ns) in kvm_vcpu_block()
2906 else if (vcpu->halt_poll_ns < vcpu->kvm->max_halt_poll_ns && in kvm_vcpu_block()
2907 block_ns < vcpu->kvm->max_halt_poll_ns) in kvm_vcpu_block()
3039 struct kvm *kvm = me->kvm; in kvm_vcpu_on_spin() local
3041 int last_boosted_vcpu = me->kvm->last_boosted_vcpu; in kvm_vcpu_on_spin()
3056 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_vcpu_on_spin()
3077 kvm->last_boosted_vcpu = i; in kvm_vcpu_on_spin()
3106 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); in kvm_vcpu_fault()
3129 kvm_put_kvm(vcpu->kvm); in kvm_vcpu_release()
3163 vcpu->kvm->debugfs_dentry); in kvm_create_vcpu_debugfs()
3172 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) in kvm_vm_ioctl_create_vcpu() argument
3181 mutex_lock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
3182 if (kvm->created_vcpus == KVM_MAX_VCPUS) { in kvm_vm_ioctl_create_vcpu()
3183 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
3187 kvm->created_vcpus++; in kvm_vm_ioctl_create_vcpu()
3188 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
3190 r = kvm_arch_vcpu_precreate(kvm, id); in kvm_vm_ioctl_create_vcpu()
3208 kvm_vcpu_init(vcpu, kvm, id); in kvm_vm_ioctl_create_vcpu()
3214 mutex_lock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
3215 if (kvm_get_vcpu_by_id(kvm, id)) { in kvm_vm_ioctl_create_vcpu()
3220 vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus); in kvm_vm_ioctl_create_vcpu()
3221 BUG_ON(kvm->vcpus[vcpu->vcpu_idx]); in kvm_vm_ioctl_create_vcpu()
3224 kvm_get_kvm(kvm); in kvm_vm_ioctl_create_vcpu()
3227 kvm_put_kvm_no_destroy(kvm); in kvm_vm_ioctl_create_vcpu()
3231 kvm->vcpus[vcpu->vcpu_idx] = vcpu; in kvm_vm_ioctl_create_vcpu()
3238 atomic_inc(&kvm->online_vcpus); in kvm_vm_ioctl_create_vcpu()
3240 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
3246 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
3253 mutex_lock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
3254 kvm->created_vcpus--; in kvm_vm_ioctl_create_vcpu()
3255 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
3279 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_bugged) in kvm_vcpu_ioctl()
3485 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_bugged) in kvm_vcpu_compat_ioctl()
3551 if (dev->kvm->mm != current->mm || dev->kvm->vm_bugged) in kvm_device_ioctl()
3572 struct kvm *kvm = dev->kvm; in kvm_device_release() local
3575 mutex_lock(&kvm->lock); in kvm_device_release()
3578 mutex_unlock(&kvm->lock); in kvm_device_release()
3581 kvm_put_kvm(kvm); in kvm_device_release()
3625 static int kvm_ioctl_create_device(struct kvm *kvm, in kvm_ioctl_create_device() argument
3650 dev->kvm = kvm; in kvm_ioctl_create_device()
3652 mutex_lock(&kvm->lock); in kvm_ioctl_create_device()
3655 mutex_unlock(&kvm->lock); in kvm_ioctl_create_device()
3659 list_add(&dev->vm_node, &kvm->devices); in kvm_ioctl_create_device()
3660 mutex_unlock(&kvm->lock); in kvm_ioctl_create_device()
3665 kvm_get_kvm(kvm); in kvm_ioctl_create_device()
3668 kvm_put_kvm_no_destroy(kvm); in kvm_ioctl_create_device()
3669 mutex_lock(&kvm->lock); in kvm_ioctl_create_device()
3673 mutex_unlock(&kvm->lock); in kvm_ioctl_create_device()
3683 static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) in kvm_vm_ioctl_check_extension_generic() argument
3725 return kvm_vm_ioctl_check_extension(kvm, arg); in kvm_vm_ioctl_check_extension_generic()
3728 int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm, in kvm_vm_ioctl_enable_cap() argument
3734 static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm, in kvm_vm_ioctl_enable_cap_generic() argument
3747 kvm->manual_dirty_log_protect = cap->args[0]; in kvm_vm_ioctl_enable_cap_generic()
3755 kvm->max_halt_poll_ns = cap->args[0]; in kvm_vm_ioctl_enable_cap_generic()
3759 return kvm_vm_ioctl_enable_cap(kvm, cap); in kvm_vm_ioctl_enable_cap_generic()
3766 struct kvm *kvm = filp->private_data; in kvm_vm_ioctl() local
3770 if (kvm->mm != current->mm || kvm->vm_bugged) in kvm_vm_ioctl()
3774 r = kvm_vm_ioctl_create_vcpu(kvm, arg); in kvm_vm_ioctl()
3782 r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap); in kvm_vm_ioctl()
3793 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem); in kvm_vm_ioctl()
3802 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); in kvm_vm_ioctl()
3812 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log); in kvm_vm_ioctl()
3823 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); in kvm_vm_ioctl()
3832 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); in kvm_vm_ioctl()
3842 r = kvm_irqfd(kvm, &data); in kvm_vm_ioctl()
3851 r = kvm_ioeventfd(kvm, &data); in kvm_vm_ioctl()
3861 r = kvm_send_userspace_msi(kvm, &msi); in kvm_vm_ioctl()
3874 r = kvm_vm_ioctl_irq_line(kvm, &irq_event, in kvm_vm_ioctl()
3899 if (!kvm_arch_can_set_irq_routing(kvm)) in kvm_vm_ioctl()
3915 r = kvm_set_irq_routing(kvm, entries, routing.nr, in kvm_vm_ioctl()
3928 r = kvm_ioctl_create_device(kvm, &cd); in kvm_vm_ioctl()
3940 r = kvm_vm_ioctl_check_extension_generic(kvm, arg); in kvm_vm_ioctl()
3978 struct kvm *kvm = filp->private_data; in kvm_vm_compat_ioctl() local
3981 if (kvm->mm != current->mm || kvm->vm_bugged) in kvm_vm_compat_ioctl()
4003 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log); in kvm_vm_compat_ioctl()
4019 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); in kvm_vm_compat_ioctl()
4039 struct kvm *kvm; in kvm_dev_ioctl_create_vm() local
4042 kvm = kvm_create_vm(type); in kvm_dev_ioctl_create_vm()
4043 if (IS_ERR(kvm)) in kvm_dev_ioctl_create_vm()
4044 return PTR_ERR(kvm); in kvm_dev_ioctl_create_vm()
4046 r = kvm_coalesced_mmio_init(kvm); in kvm_dev_ioctl_create_vm()
4054 file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); in kvm_dev_ioctl_create_vm()
4067 if (kvm_create_vm_debugfs(kvm, r) < 0) { in kvm_dev_ioctl_create_vm()
4072 kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm); in kvm_dev_ioctl_create_vm()
4078 kvm_put_kvm(kvm); in kvm_dev_ioctl_create_vm()
4338 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); in kvm_io_bus_write()
4358 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); in kvm_io_bus_write_cookie()
4409 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); in kvm_io_bus_read()
4417 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, in kvm_io_bus_register_dev() argument
4424 bus = kvm_get_bus(kvm, bus_idx); in kvm_io_bus_register_dev()
4452 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); in kvm_io_bus_register_dev()
4453 synchronize_srcu_expedited(&kvm->srcu); in kvm_io_bus_register_dev()
4460 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, in kvm_io_bus_unregister_dev() argument
4466 bus = kvm_get_bus(kvm, bus_idx); in kvm_io_bus_unregister_dev()
4487 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); in kvm_io_bus_unregister_dev()
4488 synchronize_srcu_expedited(&kvm->srcu); in kvm_io_bus_unregister_dev()
4504 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, in kvm_io_bus_get_dev() argument
4511 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_io_bus_get_dev()
4513 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); in kvm_io_bus_get_dev()
4524 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_io_bus_get_dev()
4542 if (!refcount_inc_not_zero(&stat_data->kvm->users_count)) in kvm_debugfs_open()
4549 kvm_put_kvm(stat_data->kvm); in kvm_debugfs_open()
4562 kvm_put_kvm(stat_data->kvm); in kvm_debugfs_release()
4567 static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val) in kvm_get_stat_per_vm() argument
4569 *val = *(ulong *)((void *)kvm + offset); in kvm_get_stat_per_vm()
4574 static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset) in kvm_clear_stat_per_vm() argument
4576 *(ulong *)((void *)kvm + offset) = 0; in kvm_clear_stat_per_vm()
4581 static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val) in kvm_get_stat_per_vcpu() argument
4588 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_get_stat_per_vcpu()
4594 static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset) in kvm_clear_stat_per_vcpu() argument
4599 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_clear_stat_per_vcpu()
4612 r = kvm_get_stat_per_vm(stat_data->kvm, in kvm_stat_data_get()
4616 r = kvm_get_stat_per_vcpu(stat_data->kvm, in kvm_stat_data_get()
4634 r = kvm_clear_stat_per_vm(stat_data->kvm, in kvm_stat_data_clear()
4638 r = kvm_clear_stat_per_vcpu(stat_data->kvm, in kvm_stat_data_clear()
4665 struct kvm *kvm; in vm_stat_get() local
4670 list_for_each_entry(kvm, &vm_list, vm_list) { in vm_stat_get()
4671 kvm_get_stat_per_vm(kvm, offset, &tmp_val); in vm_stat_get()
4681 struct kvm *kvm; in vm_stat_clear() local
4687 list_for_each_entry(kvm, &vm_list, vm_list) { in vm_stat_clear()
4688 kvm_clear_stat_per_vm(kvm, offset); in vm_stat_clear()
4700 struct kvm *kvm; in vcpu_stat_get() local
4705 list_for_each_entry(kvm, &vm_list, vm_list) { in vcpu_stat_get()
4706 kvm_get_stat_per_vcpu(kvm, offset, &tmp_val); in vcpu_stat_get()
4716 struct kvm *kvm; in vcpu_stat_clear() local
4722 list_for_each_entry(kvm, &vm_list, vm_list) { in vcpu_stat_clear()
4723 kvm_clear_stat_per_vcpu(kvm, offset); in vcpu_stat_clear()
4738 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm) in kvm_uevent_notify_change() argument
4743 if (!kvm_dev.this_device || !kvm) in kvm_uevent_notify_change()
4766 kvm->userspace_pid = task_pid_nr(current); in kvm_uevent_notify_change()
4770 add_uevent_var(env, "PID=%d", kvm->userspace_pid); in kvm_uevent_notify_change()
4772 if (kvm->debugfs_dentry) { in kvm_uevent_notify_change()
4776 tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX); in kvm_uevent_notify_change()
5020 struct kvm *kvm; member
5035 struct kvm *kvm = init_context->kvm; in kvm_vm_worker_thread() local
5067 err = thread_fn(kvm, data); in kvm_vm_worker_thread()
5072 int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, in kvm_vm_create_worker_thread() argument
5080 init_context.kvm = kvm; in kvm_vm_create_worker_thread()