Lines Matching refs:kvm
25 void kvm_mmu_init_tdp_mmu(struct kvm *kvm) in kvm_mmu_init_tdp_mmu() argument
31 kvm->arch.tdp_mmu_enabled = true; in kvm_mmu_init_tdp_mmu()
33 INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots); in kvm_mmu_init_tdp_mmu()
34 INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages); in kvm_mmu_init_tdp_mmu()
37 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) in kvm_mmu_uninit_tdp_mmu() argument
39 if (!kvm->arch.tdp_mmu_enabled) in kvm_mmu_uninit_tdp_mmu()
42 WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots)); in kvm_mmu_uninit_tdp_mmu()
45 static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root) in tdp_mmu_put_root() argument
47 if (kvm_mmu_put_root(kvm, root)) in tdp_mmu_put_root()
48 kvm_tdp_mmu_free_root(kvm, root); in tdp_mmu_put_root()
51 static inline bool tdp_mmu_next_root_valid(struct kvm *kvm, in tdp_mmu_next_root_valid() argument
54 lockdep_assert_held(&kvm->mmu_lock); in tdp_mmu_next_root_valid()
56 if (list_entry_is_head(root, &kvm->arch.tdp_mmu_roots, link)) in tdp_mmu_next_root_valid()
59 kvm_mmu_get_root(kvm, root); in tdp_mmu_next_root_valid()
64 static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm, in tdp_mmu_next_root() argument
70 tdp_mmu_put_root(kvm, root); in tdp_mmu_next_root()
89 bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa) in is_tdp_mmu_root() argument
93 if (!kvm->arch.tdp_mmu_enabled) in is_tdp_mmu_root()
105 static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
108 void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root) in kvm_tdp_mmu_free_root() argument
112 lockdep_assert_held(&kvm->mmu_lock); in kvm_tdp_mmu_free_root()
119 zap_gfn_range(kvm, root, 0, max_gfn, false, false); in kvm_tdp_mmu_free_root()
158 struct kvm *kvm = vcpu->kvm; in get_tdp_mmu_vcpu_root() local
163 spin_lock(&kvm->mmu_lock); in get_tdp_mmu_vcpu_root()
166 for_each_tdp_mmu_root(kvm, root) { in get_tdp_mmu_vcpu_root()
168 kvm_mmu_get_root(kvm, root); in get_tdp_mmu_vcpu_root()
169 spin_unlock(&kvm->mmu_lock); in get_tdp_mmu_vcpu_root()
177 list_add(&root->link, &kvm->arch.tdp_mmu_roots); in get_tdp_mmu_vcpu_root()
179 spin_unlock(&kvm->mmu_lock); in get_tdp_mmu_vcpu_root()
195 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
215 static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn, in handle_changed_spte_dirty_log() argument
228 slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn); in handle_changed_spte_dirty_log()
245 static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, in __handle_changed_spte() argument
325 unaccount_huge_nx_page(kvm, sp); in __handle_changed_spte()
330 handle_changed_spte(kvm, as_id, in __handle_changed_spte()
335 kvm_flush_remote_tlbs_with_address(kvm, gfn, in __handle_changed_spte()
343 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, in handle_changed_spte() argument
346 __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level); in handle_changed_spte()
348 handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte, in handle_changed_spte()
352 static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, in __tdp_mmu_set_spte() argument
362 __handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte, in __tdp_mmu_set_spte()
368 handle_changed_spte_dirty_log(kvm, as_id, iter->gfn, in __tdp_mmu_set_spte()
373 static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, in tdp_mmu_set_spte() argument
376 __tdp_mmu_set_spte(kvm, iter, new_spte, true, true); in tdp_mmu_set_spte()
379 static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm, in tdp_mmu_set_spte_no_acc_track() argument
383 __tdp_mmu_set_spte(kvm, iter, new_spte, false, true); in tdp_mmu_set_spte_no_acc_track()
386 static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm, in tdp_mmu_set_spte_no_dirty_log() argument
390 __tdp_mmu_set_spte(kvm, iter, new_spte, true, false); in tdp_mmu_set_spte_no_dirty_log()
422 static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm, in tdp_mmu_iter_cond_resched() argument
429 if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { in tdp_mmu_iter_cond_resched()
431 kvm_flush_remote_tlbs(kvm); in tdp_mmu_iter_cond_resched()
433 cond_resched_lock(&kvm->mmu_lock); in tdp_mmu_iter_cond_resched()
460 static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, in zap_gfn_range() argument
467 tdp_mmu_iter_cond_resched(kvm, &iter, flush)) { in zap_gfn_range()
485 tdp_mmu_set_spte(kvm, &iter, 0); in zap_gfn_range()
498 bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end, in __kvm_tdp_mmu_zap_gfn_range() argument
504 for_each_tdp_mmu_root_yield_safe(kvm, root) in __kvm_tdp_mmu_zap_gfn_range()
505 flush = zap_gfn_range(kvm, root, start, end, can_yield, flush); in __kvm_tdp_mmu_zap_gfn_range()
510 void kvm_tdp_mmu_zap_all(struct kvm *kvm) in kvm_tdp_mmu_zap_all() argument
515 flush = kvm_tdp_mmu_zap_gfn_range(kvm, 0, max_gfn); in kvm_tdp_mmu_zap_all()
517 kvm_flush_remote_tlbs(kvm); in kvm_tdp_mmu_zap_all()
545 tdp_mmu_set_spte(vcpu->kvm, iter, new_spte); in tdp_mmu_map_handle_target_level()
593 if (WARN_ON(!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))) in kvm_tdp_mmu_map()
615 tdp_mmu_set_spte(vcpu->kvm, &iter, 0); in kvm_tdp_mmu_map()
617 kvm_flush_remote_tlbs_with_address(vcpu->kvm, iter.gfn, in kvm_tdp_mmu_map()
630 list_add(&sp->link, &vcpu->kvm->arch.tdp_mmu_pages); in kvm_tdp_mmu_map()
638 account_huge_nx_page(vcpu->kvm, sp); in kvm_tdp_mmu_map()
640 tdp_mmu_set_spte(vcpu->kvm, &iter, new_spte); in kvm_tdp_mmu_map()
653 static int kvm_tdp_mmu_handle_hva_range(struct kvm *kvm, unsigned long start, in kvm_tdp_mmu_handle_hva_range() argument
655 int (*handler)(struct kvm *kvm, struct kvm_memory_slot *slot, in kvm_tdp_mmu_handle_hva_range() argument
665 for_each_tdp_mmu_root_yield_safe(kvm, root) { in kvm_tdp_mmu_handle_hva_range()
667 slots = __kvm_memslots(kvm, as_id); in kvm_tdp_mmu_handle_hva_range()
684 ret |= handler(kvm, memslot, root, gfn_start, in kvm_tdp_mmu_handle_hva_range()
692 static int zap_gfn_range_hva_wrapper(struct kvm *kvm, in zap_gfn_range_hva_wrapper() argument
697 return zap_gfn_range(kvm, root, start, end, false, false); in zap_gfn_range_hva_wrapper()
700 int kvm_tdp_mmu_zap_hva_range(struct kvm *kvm, unsigned long start, in kvm_tdp_mmu_zap_hva_range() argument
703 return kvm_tdp_mmu_handle_hva_range(kvm, start, end, 0, in kvm_tdp_mmu_zap_hva_range()
711 static int age_gfn_range(struct kvm *kvm, struct kvm_memory_slot *slot, in age_gfn_range() argument
744 tdp_mmu_set_spte_no_acc_track(kvm, &iter, new_spte); in age_gfn_range()
751 int kvm_tdp_mmu_age_hva_range(struct kvm *kvm, unsigned long start, in kvm_tdp_mmu_age_hva_range() argument
754 return kvm_tdp_mmu_handle_hva_range(kvm, start, end, 0, in kvm_tdp_mmu_age_hva_range()
758 static int test_age_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, in test_age_gfn() argument
771 int kvm_tdp_mmu_test_age_hva(struct kvm *kvm, unsigned long hva) in kvm_tdp_mmu_test_age_hva() argument
773 return kvm_tdp_mmu_handle_hva_range(kvm, hva, hva + 1, 0, in kvm_tdp_mmu_test_age_hva()
783 static int set_tdp_spte(struct kvm *kvm, struct kvm_memory_slot *slot, in set_tdp_spte() argument
804 tdp_mmu_set_spte(kvm, &iter, 0); in set_tdp_spte()
806 kvm_flush_remote_tlbs_with_address(kvm, iter.gfn, 1); in set_tdp_spte()
812 tdp_mmu_set_spte(kvm, &iter, new_spte); in set_tdp_spte()
819 kvm_flush_remote_tlbs_with_address(kvm, gfn, 1); in set_tdp_spte()
824 int kvm_tdp_mmu_set_spte_hva(struct kvm *kvm, unsigned long address, in kvm_tdp_mmu_set_spte_hva() argument
827 return kvm_tdp_mmu_handle_hva_range(kvm, address, address + 1, in kvm_tdp_mmu_set_spte_hva()
837 static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, in wrprot_gfn_range() argument
848 if (tdp_mmu_iter_cond_resched(kvm, &iter, false)) in wrprot_gfn_range()
857 tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte); in wrprot_gfn_range()
868 bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot, in kvm_tdp_mmu_wrprot_slot() argument
875 for_each_tdp_mmu_root_yield_safe(kvm, root) { in kvm_tdp_mmu_wrprot_slot()
880 spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_wrprot_slot()
894 static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, in clear_dirty_gfn_range() argument
902 if (tdp_mmu_iter_cond_resched(kvm, &iter, false)) in clear_dirty_gfn_range()
920 tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte); in clear_dirty_gfn_range()
933 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, struct kvm_memory_slot *slot) in kvm_tdp_mmu_clear_dirty_slot() argument
939 for_each_tdp_mmu_root_yield_safe(kvm, root) { in kvm_tdp_mmu_clear_dirty_slot()
944 spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_clear_dirty_slot()
958 static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, in clear_dirty_pt_masked() argument
985 tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte); in clear_dirty_pt_masked()
998 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, in kvm_tdp_mmu_clear_dirty_pt_masked() argument
1006 lockdep_assert_held(&kvm->mmu_lock); in kvm_tdp_mmu_clear_dirty_pt_masked()
1007 for_each_tdp_mmu_root(kvm, root) { in kvm_tdp_mmu_clear_dirty_pt_masked()
1012 clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot); in kvm_tdp_mmu_clear_dirty_pt_masked()
1021 static bool set_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, in set_dirty_gfn_range() argument
1029 if (tdp_mmu_iter_cond_resched(kvm, &iter, false)) in set_dirty_gfn_range()
1037 tdp_mmu_set_spte(kvm, &iter, new_spte); in set_dirty_gfn_range()
1049 bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot) in kvm_tdp_mmu_slot_set_dirty() argument
1055 for_each_tdp_mmu_root_yield_safe(kvm, root) { in kvm_tdp_mmu_slot_set_dirty()
1060 spte_set |= set_dirty_gfn_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_slot_set_dirty()
1070 static void zap_collapsible_spte_range(struct kvm *kvm, in zap_collapsible_spte_range() argument
1079 if (tdp_mmu_iter_cond_resched(kvm, &iter, spte_set)) { in zap_collapsible_spte_range()
1094 tdp_mmu_set_spte(kvm, &iter, 0); in zap_collapsible_spte_range()
1100 kvm_flush_remote_tlbs(kvm); in zap_collapsible_spte_range()
1107 void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, in kvm_tdp_mmu_zap_collapsible_sptes() argument
1113 for_each_tdp_mmu_root_yield_safe(kvm, root) { in kvm_tdp_mmu_zap_collapsible_sptes()
1118 zap_collapsible_spte_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_zap_collapsible_sptes()
1128 static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, in write_protect_gfn() argument
1142 tdp_mmu_set_spte(kvm, &iter, new_spte); in write_protect_gfn()
1154 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, in kvm_tdp_mmu_write_protect_gfn() argument
1161 lockdep_assert_held(&kvm->mmu_lock); in kvm_tdp_mmu_write_protect_gfn()
1162 for_each_tdp_mmu_root(kvm, root) { in kvm_tdp_mmu_write_protect_gfn()
1167 spte_set |= write_protect_gfn(kvm, root, gfn); in kvm_tdp_mmu_write_protect_gfn()