Lines Matching refs:kvm

192 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
194 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
196 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
264 struct kvm *kvm; member
386 struct kvm *kvm, int irq_source_id, int level,
447 struct kvm { struct
538 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
539 static inline void kvm_vm_bugged(struct kvm *kvm) in kvm_vm_bugged() argument
541 kvm->vm_bugged = true; in kvm_vm_bugged()
542 kvm_make_all_cpus_request(kvm, KVM_REQ_VM_BUGGED); in kvm_vm_bugged()
545 #define KVM_BUG(cond, kvm, fmt...) \ argument
549 if (WARN_ONCE(__ret && !(kvm)->vm_bugged, fmt)) \
550 kvm_vm_bugged(kvm); \
554 #define KVM_BUG_ON(cond, kvm) \ argument
558 if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \
559 kvm_vm_bugged(kvm); \
563 static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm) in kvm_dirty_log_manual_protect_and_init_set() argument
565 return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET); in kvm_dirty_log_manual_protect_and_init_set()
568 static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx) in kvm_get_bus() argument
570 return srcu_dereference_check(kvm->buses[idx], &kvm->srcu, in kvm_get_bus()
571 lockdep_is_held(&kvm->slots_lock) || in kvm_get_bus()
572 !refcount_read(&kvm->users_count)); in kvm_get_bus()
575 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) in kvm_get_vcpu() argument
577 int num_vcpus = atomic_read(&kvm->online_vcpus); in kvm_get_vcpu()
582 return kvm->vcpus[i]; in kvm_get_vcpu()
585 #define kvm_for_each_vcpu(idx, vcpup, kvm) \ argument
587 idx < atomic_read(&kvm->online_vcpus) && \
588 (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
591 static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) in kvm_get_vcpu_by_id() argument
599 vcpu = kvm_get_vcpu(kvm, id); in kvm_get_vcpu_by_id()
602 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_get_vcpu_by_id()
625 void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm);
626 void kvm_arch_post_irq_routing_update(struct kvm *kvm);
628 static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm) in kvm_arch_post_irq_ack_notifier_list_update() argument
631 static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm) in kvm_arch_post_irq_routing_update() argument
653 void kvm_get_kvm(struct kvm *kvm);
654 void kvm_put_kvm(struct kvm *kvm);
655 void kvm_put_kvm_no_destroy(struct kvm *kvm);
657 static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) in __kvm_memslots() argument
660 return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu, in __kvm_memslots()
661 lockdep_is_held(&kvm->slots_lock) || in __kvm_memslots()
662 !refcount_read(&kvm->users_count)); in __kvm_memslots()
665 static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) in kvm_memslots() argument
667 return __kvm_memslots(kvm, 0); in kvm_memslots()
674 return __kvm_memslots(vcpu->kvm, as_id); in kvm_vcpu_memslots()
710 int kvm_set_memory_region(struct kvm *kvm,
712 int __kvm_set_memory_region(struct kvm *kvm,
714 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
715 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
716 int kvm_arch_prepare_memory_region(struct kvm *kvm,
720 void kvm_arch_commit_memory_region(struct kvm *kvm,
726 void kvm_arch_flush_shadow_all(struct kvm *kvm);
728 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
734 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
735 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
736 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
744 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
745 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
760 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
762 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
763 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
765 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
768 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
770 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
772 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
774 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
777 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
780 #define __kvm_get_guest(kvm, gfn, offset, v) \ argument
782 unsigned long __addr = gfn_to_hva(kvm, gfn); \
791 #define kvm_get_guest(kvm, gpa, v) \ argument
794 struct kvm *__kvm = kvm; \
800 #define __kvm_put_guest(kvm, gfn, offset, v) \ argument
802 unsigned long __addr = gfn_to_hva(kvm, gfn); \
809 mark_page_dirty(kvm, gfn); \
813 #define kvm_put_guest(kvm, gpa, v) \ argument
816 struct kvm *__kvm = kvm; \
822 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
823 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
824 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
825 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
829 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
867 void kvm_flush_remote_tlbs(struct kvm *kvm);
868 void kvm_reload_remote_mmus(struct kvm *kvm);
877 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
880 bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
882 bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req,
891 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
893 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
897 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot);
900 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
903 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
904 int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
908 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
910 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
944 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id);
962 int kvm_arch_post_init_vm(struct kvm *kvm);
963 void kvm_arch_pre_destroy_vm(struct kvm *kvm);
970 static inline struct kvm *kvm_arch_alloc_vm(void) in kvm_arch_alloc_vm()
972 return kzalloc(sizeof(struct kvm), GFP_KERNEL); in kvm_arch_alloc_vm()
975 static inline void kvm_arch_free_vm(struct kvm *kvm) in kvm_arch_free_vm() argument
977 kfree(kvm); in kvm_arch_free_vm()
982 static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm) in kvm_arch_flush_remote_tlb() argument
989 void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
990 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
991 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
993 static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm) in kvm_arch_register_noncoherent_dma() argument
997 static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) in kvm_arch_unregister_noncoherent_dma() argument
1001 static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) in kvm_arch_has_noncoherent_dma() argument
1007 void kvm_arch_start_assignment(struct kvm *kvm);
1008 void kvm_arch_end_assignment(struct kvm *kvm);
1009 bool kvm_arch_has_assigned_device(struct kvm *kvm);
1011 static inline void kvm_arch_start_assignment(struct kvm *kvm) in kvm_arch_start_assignment() argument
1015 static inline void kvm_arch_end_assignment(struct kvm *kvm) in kvm_arch_end_assignment() argument
1019 static __always_inline bool kvm_arch_has_assigned_device(struct kvm *kvm) in kvm_arch_has_assigned_device() argument
1040 bool kvm_arch_intc_initialized(struct kvm *kvm);
1042 static inline bool kvm_arch_intc_initialized(struct kvm *kvm) in kvm_arch_intc_initialized() argument
1048 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
1049 void kvm_arch_destroy_vm(struct kvm *kvm);
1050 void kvm_arch_sync_events(struct kvm *kvm);
1064 int kvm_irq_map_gsi(struct kvm *kvm,
1066 int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
1068 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1070 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
1073 struct kvm *kvm, int irq_source_id,
1075 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
1076 void kvm_notify_acked_gsi(struct kvm *kvm, int gsi);
1077 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
1078 void kvm_register_irq_ack_notifier(struct kvm *kvm,
1080 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
1082 int kvm_request_irq_source_id(struct kvm *kvm);
1083 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
1084 bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
1146 static inline int memslot_id(struct kvm *kvm, gfn_t gfn) in memslot_id() argument
1148 return gfn_to_memslot(kvm, gfn)->id; in memslot_id()
1180 static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa) in kvm_is_error_gpa() argument
1182 unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); in kvm_is_error_gpa()
1193 struct kvm *kvm; member
1208 { n, offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__ }
1216 static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) in mmu_notifier_retry() argument
1218 if (unlikely(kvm->mmu_notifier_count)) in mmu_notifier_retry()
1231 if (kvm->mmu_notifier_seq != mmu_seq) in mmu_notifier_retry()
1241 bool kvm_arch_can_set_irq_routing(struct kvm *kvm);
1242 int kvm_set_irq_routing(struct kvm *kvm,
1246 int kvm_set_routing_entry(struct kvm *kvm,
1249 void kvm_free_irq_routing(struct kvm *kvm);
1253 static inline void kvm_free_irq_routing(struct kvm *kvm) {} in kvm_free_irq_routing() argument
1257 int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
1261 void kvm_eventfd_init(struct kvm *kvm);
1262 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
1265 int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
1266 void kvm_irqfd_release(struct kvm *kvm);
1267 void kvm_irq_routing_update(struct kvm *);
1269 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) in kvm_irqfd() argument
1274 static inline void kvm_irqfd_release(struct kvm *kvm) {} in kvm_irqfd_release() argument
1279 static inline void kvm_eventfd_init(struct kvm *kvm) {} in kvm_eventfd_init() argument
1281 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) in kvm_irqfd() argument
1286 static inline void kvm_irqfd_release(struct kvm *kvm) {} in kvm_irqfd_release() argument
1289 static inline void kvm_irq_routing_update(struct kvm *kvm) in kvm_irq_routing_update() argument
1294 static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) in kvm_ioeventfd() argument
1301 void kvm_arch_irq_routing_update(struct kvm *kvm);
1353 struct kvm *kvm; member
1451 int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
1491 void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
1494 void kvm_arch_guest_memory_reclaimed(struct kvm *kvm);
1505 typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data);
1507 int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,