Lines Matching refs:root
45 static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root) in tdp_mmu_put_root() argument
47 if (kvm_mmu_put_root(kvm, root)) in tdp_mmu_put_root()
48 kvm_tdp_mmu_free_root(kvm, root); in tdp_mmu_put_root()
52 struct kvm_mmu_page *root) in tdp_mmu_next_root_valid() argument
56 if (list_entry_is_head(root, &kvm->arch.tdp_mmu_roots, link)) in tdp_mmu_next_root_valid()
59 kvm_mmu_get_root(kvm, root); in tdp_mmu_next_root_valid()
65 struct kvm_mmu_page *root) in tdp_mmu_next_root() argument
69 next_root = list_next_entry(root, link); in tdp_mmu_next_root()
70 tdp_mmu_put_root(kvm, root); in tdp_mmu_next_root()
105 static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
108 void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root) in kvm_tdp_mmu_free_root() argument
114 WARN_ON(root->root_count); in kvm_tdp_mmu_free_root()
115 WARN_ON(!root->tdp_mmu_page); in kvm_tdp_mmu_free_root()
117 list_del(&root->link); in kvm_tdp_mmu_free_root()
119 zap_gfn_range(kvm, root, 0, max_gfn, false, false); in kvm_tdp_mmu_free_root()
121 free_page((unsigned long)root->spt); in kvm_tdp_mmu_free_root()
122 kmem_cache_free(mmu_page_header_cache, root); in kvm_tdp_mmu_free_root()
159 struct kvm_mmu_page *root; in get_tdp_mmu_vcpu_root() local
166 for_each_tdp_mmu_root(kvm, root) { in get_tdp_mmu_vcpu_root()
167 if (root->role.word == role.word) { in get_tdp_mmu_vcpu_root()
168 kvm_mmu_get_root(kvm, root); in get_tdp_mmu_vcpu_root()
170 return root; in get_tdp_mmu_vcpu_root()
174 root = alloc_tdp_mmu_page(vcpu, 0, vcpu->arch.mmu->shadow_root_level); in get_tdp_mmu_vcpu_root()
175 root->root_count = 1; in get_tdp_mmu_vcpu_root()
177 list_add(&root->link, &kvm->arch.tdp_mmu_roots); in get_tdp_mmu_vcpu_root()
181 return root; in get_tdp_mmu_vcpu_root()
186 struct kvm_mmu_page *root; in kvm_tdp_mmu_get_vcpu_root_hpa() local
188 root = get_tdp_mmu_vcpu_root(vcpu); in kvm_tdp_mmu_get_vcpu_root_hpa()
189 if (!root) in kvm_tdp_mmu_get_vcpu_root_hpa()
192 return __pa(root->spt); in kvm_tdp_mmu_get_vcpu_root_hpa()
357 struct kvm_mmu_page *root = sptep_to_sp(root_pt); in __tdp_mmu_set_spte() local
358 int as_id = kvm_mmu_page_as_id(root); in __tdp_mmu_set_spte()
460 static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, in zap_gfn_range() argument
465 tdp_root_for_each_pte(iter, root, start, end) { in zap_gfn_range()
501 struct kvm_mmu_page *root; in __kvm_tdp_mmu_zap_gfn_range() local
504 for_each_tdp_mmu_root_yield_safe(kvm, root) in __kvm_tdp_mmu_zap_gfn_range()
505 flush = zap_gfn_range(kvm, root, start, end, can_yield, flush); in __kvm_tdp_mmu_zap_gfn_range()
656 struct kvm_mmu_page *root, gfn_t start, in kvm_tdp_mmu_handle_hva_range() argument
661 struct kvm_mmu_page *root; in kvm_tdp_mmu_handle_hva_range() local
665 for_each_tdp_mmu_root_yield_safe(kvm, root) { in kvm_tdp_mmu_handle_hva_range()
666 as_id = kvm_mmu_page_as_id(root); in kvm_tdp_mmu_handle_hva_range()
684 ret |= handler(kvm, memslot, root, gfn_start, in kvm_tdp_mmu_handle_hva_range()
694 struct kvm_mmu_page *root, gfn_t start, in zap_gfn_range_hva_wrapper() argument
697 return zap_gfn_range(kvm, root, start, end, false, false); in zap_gfn_range_hva_wrapper()
712 struct kvm_mmu_page *root, gfn_t start, gfn_t end, in age_gfn_range() argument
719 tdp_root_for_each_leaf_pte(iter, root, start, end) { in age_gfn_range()
759 struct kvm_mmu_page *root, gfn_t gfn, gfn_t unused, in test_age_gfn() argument
764 tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) in test_age_gfn()
784 struct kvm_mmu_page *root, gfn_t gfn, gfn_t unused, in set_tdp_spte() argument
797 tdp_root_for_each_pte(iter, root, gfn, gfn + 1) { in set_tdp_spte()
837 static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, in wrprot_gfn_range() argument
846 for_each_tdp_pte_min_level(iter, root->spt, root->role.level, in wrprot_gfn_range()
871 struct kvm_mmu_page *root; in kvm_tdp_mmu_wrprot_slot() local
875 for_each_tdp_mmu_root_yield_safe(kvm, root) { in kvm_tdp_mmu_wrprot_slot()
876 root_as_id = kvm_mmu_page_as_id(root); in kvm_tdp_mmu_wrprot_slot()
880 spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_wrprot_slot()
894 static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, in clear_dirty_gfn_range() argument
901 tdp_root_for_each_leaf_pte(iter, root, start, end) { in clear_dirty_gfn_range()
935 struct kvm_mmu_page *root; in kvm_tdp_mmu_clear_dirty_slot() local
939 for_each_tdp_mmu_root_yield_safe(kvm, root) { in kvm_tdp_mmu_clear_dirty_slot()
940 root_as_id = kvm_mmu_page_as_id(root); in kvm_tdp_mmu_clear_dirty_slot()
944 spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_clear_dirty_slot()
958 static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, in clear_dirty_pt_masked() argument
964 tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask), in clear_dirty_pt_masked()
1003 struct kvm_mmu_page *root; in kvm_tdp_mmu_clear_dirty_pt_masked() local
1007 for_each_tdp_mmu_root(kvm, root) { in kvm_tdp_mmu_clear_dirty_pt_masked()
1008 root_as_id = kvm_mmu_page_as_id(root); in kvm_tdp_mmu_clear_dirty_pt_masked()
1012 clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot); in kvm_tdp_mmu_clear_dirty_pt_masked()
1021 static bool set_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, in set_dirty_gfn_range() argument
1028 tdp_root_for_each_pte(iter, root, start, end) { in set_dirty_gfn_range()
1051 struct kvm_mmu_page *root; in kvm_tdp_mmu_slot_set_dirty() local
1055 for_each_tdp_mmu_root_yield_safe(kvm, root) { in kvm_tdp_mmu_slot_set_dirty()
1056 root_as_id = kvm_mmu_page_as_id(root); in kvm_tdp_mmu_slot_set_dirty()
1060 spte_set |= set_dirty_gfn_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_slot_set_dirty()
1071 struct kvm_mmu_page *root, in zap_collapsible_spte_range() argument
1078 tdp_root_for_each_pte(iter, root, start, end) { in zap_collapsible_spte_range()
1110 struct kvm_mmu_page *root; in kvm_tdp_mmu_zap_collapsible_sptes() local
1113 for_each_tdp_mmu_root_yield_safe(kvm, root) { in kvm_tdp_mmu_zap_collapsible_sptes()
1114 root_as_id = kvm_mmu_page_as_id(root); in kvm_tdp_mmu_zap_collapsible_sptes()
1118 zap_collapsible_spte_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_zap_collapsible_sptes()
1128 static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, in write_protect_gfn() argument
1135 tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) { in write_protect_gfn()
1157 struct kvm_mmu_page *root; in kvm_tdp_mmu_write_protect_gfn() local
1162 for_each_tdp_mmu_root(kvm, root) { in kvm_tdp_mmu_write_protect_gfn()
1163 root_as_id = kvm_mmu_page_as_id(root); in kvm_tdp_mmu_write_protect_gfn()
1167 spte_set |= write_protect_gfn(kvm, root, gfn); in kvm_tdp_mmu_write_protect_gfn()