Lines Matching refs:iter

352 static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,  in __tdp_mmu_set_spte()  argument
356 u64 *root_pt = tdp_iter_root_pt(iter); in __tdp_mmu_set_spte()
360 WRITE_ONCE(*iter->sptep, new_spte); in __tdp_mmu_set_spte()
362 __handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte, in __tdp_mmu_set_spte()
363 iter->level); in __tdp_mmu_set_spte()
365 handle_changed_spte_acc_track(iter->old_spte, new_spte, in __tdp_mmu_set_spte()
366 iter->level); in __tdp_mmu_set_spte()
368 handle_changed_spte_dirty_log(kvm, as_id, iter->gfn, in __tdp_mmu_set_spte()
369 iter->old_spte, new_spte, in __tdp_mmu_set_spte()
370 iter->level); in __tdp_mmu_set_spte()
373 static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, in tdp_mmu_set_spte() argument
376 __tdp_mmu_set_spte(kvm, iter, new_spte, true, true); in tdp_mmu_set_spte()
380 struct tdp_iter *iter, in tdp_mmu_set_spte_no_acc_track() argument
383 __tdp_mmu_set_spte(kvm, iter, new_spte, false, true); in tdp_mmu_set_spte_no_acc_track()
387 struct tdp_iter *iter, in tdp_mmu_set_spte_no_dirty_log() argument
390 __tdp_mmu_set_spte(kvm, iter, new_spte, true, false); in tdp_mmu_set_spte_no_dirty_log()
423 struct tdp_iter *iter, bool flush) in tdp_mmu_iter_cond_resched() argument
426 if (iter->next_last_level_gfn == iter->yielded_gfn) in tdp_mmu_iter_cond_resched()
435 WARN_ON(iter->gfn > iter->next_last_level_gfn); in tdp_mmu_iter_cond_resched()
437 tdp_iter_start(iter, iter->pt_path[iter->root_level - 1], in tdp_mmu_iter_cond_resched()
438 iter->root_level, iter->min_level, in tdp_mmu_iter_cond_resched()
439 iter->next_last_level_gfn); in tdp_mmu_iter_cond_resched()
463 struct tdp_iter iter; in zap_gfn_range() local
465 tdp_root_for_each_pte(iter, root, start, end) { in zap_gfn_range()
467 tdp_mmu_iter_cond_resched(kvm, &iter, flush)) { in zap_gfn_range()
472 if (!is_shadow_present_pte(iter.old_spte)) in zap_gfn_range()
480 if ((iter.gfn < start || in zap_gfn_range()
481 iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) && in zap_gfn_range()
482 !is_last_spte(iter.old_spte, iter.level)) in zap_gfn_range()
485 tdp_mmu_set_spte(kvm, &iter, 0); in zap_gfn_range()
526 struct tdp_iter *iter, in tdp_mmu_map_handle_target_level() argument
534 new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL); in tdp_mmu_map_handle_target_level()
535 trace_mark_mmio_spte(iter->sptep, iter->gfn, new_spte); in tdp_mmu_map_handle_target_level()
537 make_spte_ret = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn, in tdp_mmu_map_handle_target_level()
538 pfn, iter->old_spte, prefault, true, in tdp_mmu_map_handle_target_level()
542 if (new_spte == iter->old_spte) in tdp_mmu_map_handle_target_level()
545 tdp_mmu_set_spte(vcpu->kvm, iter, new_spte); in tdp_mmu_map_handle_target_level()
562 trace_kvm_mmu_set_spte(iter->level, iter->gfn, iter->sptep); in tdp_mmu_map_handle_target_level()
582 struct tdp_iter iter; in kvm_tdp_mmu_map() local
600 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { in kvm_tdp_mmu_map()
602 disallowed_hugepage_adjust(iter.old_spte, gfn, in kvm_tdp_mmu_map()
603 iter.level, &pfn, &level); in kvm_tdp_mmu_map()
605 if (iter.level == level) in kvm_tdp_mmu_map()
613 if (is_shadow_present_pte(iter.old_spte) && in kvm_tdp_mmu_map()
614 is_large_pte(iter.old_spte)) { in kvm_tdp_mmu_map()
615 tdp_mmu_set_spte(vcpu->kvm, &iter, 0); in kvm_tdp_mmu_map()
617 kvm_flush_remote_tlbs_with_address(vcpu->kvm, iter.gfn, in kvm_tdp_mmu_map()
618 KVM_PAGES_PER_HPAGE(iter.level)); in kvm_tdp_mmu_map()
625 iter.old_spte = READ_ONCE(*iter.sptep); in kvm_tdp_mmu_map()
628 if (!is_shadow_present_pte(iter.old_spte)) { in kvm_tdp_mmu_map()
629 sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level); in kvm_tdp_mmu_map()
637 if (huge_page_disallowed && req_level >= iter.level) in kvm_tdp_mmu_map()
640 tdp_mmu_set_spte(vcpu->kvm, &iter, new_spte); in kvm_tdp_mmu_map()
644 if (WARN_ON(iter.level != level)) in kvm_tdp_mmu_map()
647 ret = tdp_mmu_map_handle_target_level(vcpu, write, map_writable, &iter, in kvm_tdp_mmu_map()
715 struct tdp_iter iter; in age_gfn_range() local
719 tdp_root_for_each_leaf_pte(iter, root, start, end) { in age_gfn_range()
724 if (!is_accessed_spte(iter.old_spte)) in age_gfn_range()
727 new_spte = iter.old_spte; in age_gfn_range()
744 tdp_mmu_set_spte_no_acc_track(kvm, &iter, new_spte); in age_gfn_range()
762 struct tdp_iter iter; in test_age_gfn() local
764 tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) in test_age_gfn()
765 if (is_accessed_spte(iter.old_spte)) in test_age_gfn()
787 struct tdp_iter iter; in set_tdp_spte() local
797 tdp_root_for_each_pte(iter, root, gfn, gfn + 1) { in set_tdp_spte()
798 if (iter.level != PG_LEVEL_4K) in set_tdp_spte()
801 if (!is_shadow_present_pte(iter.old_spte)) in set_tdp_spte()
804 tdp_mmu_set_spte(kvm, &iter, 0); in set_tdp_spte()
806 kvm_flush_remote_tlbs_with_address(kvm, iter.gfn, 1); in set_tdp_spte()
810 iter.old_spte, new_pfn); in set_tdp_spte()
812 tdp_mmu_set_spte(kvm, &iter, new_spte); in set_tdp_spte()
840 struct tdp_iter iter; in wrprot_gfn_range() local
846 for_each_tdp_pte_min_level(iter, root->spt, root->role.level, in wrprot_gfn_range()
848 if (tdp_mmu_iter_cond_resched(kvm, &iter, false)) in wrprot_gfn_range()
851 if (!is_shadow_present_pte(iter.old_spte) || in wrprot_gfn_range()
852 !is_last_spte(iter.old_spte, iter.level)) in wrprot_gfn_range()
855 new_spte = iter.old_spte & ~PT_WRITABLE_MASK; in wrprot_gfn_range()
857 tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte); in wrprot_gfn_range()
897 struct tdp_iter iter; in clear_dirty_gfn_range() local
901 tdp_root_for_each_leaf_pte(iter, root, start, end) { in clear_dirty_gfn_range()
902 if (tdp_mmu_iter_cond_resched(kvm, &iter, false)) in clear_dirty_gfn_range()
905 if (!is_shadow_present_pte(iter.old_spte)) in clear_dirty_gfn_range()
908 if (spte_ad_need_write_protect(iter.old_spte)) { in clear_dirty_gfn_range()
909 if (is_writable_pte(iter.old_spte)) in clear_dirty_gfn_range()
910 new_spte = iter.old_spte & ~PT_WRITABLE_MASK; in clear_dirty_gfn_range()
914 if (iter.old_spte & shadow_dirty_mask) in clear_dirty_gfn_range()
915 new_spte = iter.old_spte & ~shadow_dirty_mask; in clear_dirty_gfn_range()
920 tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte); in clear_dirty_gfn_range()
961 struct tdp_iter iter; in clear_dirty_pt_masked() local
964 tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask), in clear_dirty_pt_masked()
969 if (iter.level > PG_LEVEL_4K || in clear_dirty_pt_masked()
970 !(mask & (1UL << (iter.gfn - gfn)))) in clear_dirty_pt_masked()
973 if (wrprot || spte_ad_need_write_protect(iter.old_spte)) { in clear_dirty_pt_masked()
974 if (is_writable_pte(iter.old_spte)) in clear_dirty_pt_masked()
975 new_spte = iter.old_spte & ~PT_WRITABLE_MASK; in clear_dirty_pt_masked()
979 if (iter.old_spte & shadow_dirty_mask) in clear_dirty_pt_masked()
980 new_spte = iter.old_spte & ~shadow_dirty_mask; in clear_dirty_pt_masked()
985 tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte); in clear_dirty_pt_masked()
987 mask &= ~(1UL << (iter.gfn - gfn)); in clear_dirty_pt_masked()
1024 struct tdp_iter iter; in set_dirty_gfn_range() local
1028 tdp_root_for_each_pte(iter, root, start, end) { in set_dirty_gfn_range()
1029 if (tdp_mmu_iter_cond_resched(kvm, &iter, false)) in set_dirty_gfn_range()
1032 if (!is_shadow_present_pte(iter.old_spte)) in set_dirty_gfn_range()
1035 new_spte = iter.old_spte | shadow_dirty_mask; in set_dirty_gfn_range()
1037 tdp_mmu_set_spte(kvm, &iter, new_spte); in set_dirty_gfn_range()
1074 struct tdp_iter iter; in zap_collapsible_spte_range() local
1078 tdp_root_for_each_pte(iter, root, start, end) { in zap_collapsible_spte_range()
1079 if (tdp_mmu_iter_cond_resched(kvm, &iter, spte_set)) { in zap_collapsible_spte_range()
1084 if (!is_shadow_present_pte(iter.old_spte) || in zap_collapsible_spte_range()
1085 !is_last_spte(iter.old_spte, iter.level)) in zap_collapsible_spte_range()
1088 pfn = spte_to_pfn(iter.old_spte); in zap_collapsible_spte_range()
1094 tdp_mmu_set_spte(kvm, &iter, 0); in zap_collapsible_spte_range()
1131 struct tdp_iter iter; in write_protect_gfn() local
1135 tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) { in write_protect_gfn()
1136 new_spte = iter.old_spte & in write_protect_gfn()
1139 if (new_spte == iter.old_spte) in write_protect_gfn()
1142 tdp_mmu_set_spte(kvm, &iter, new_spte); in write_protect_gfn()
1179 struct tdp_iter iter; in kvm_tdp_mmu_get_walk() local
1186 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) { in kvm_tdp_mmu_get_walk()
1187 leaf = iter.level; in kvm_tdp_mmu_get_walk()
1188 sptes[leaf - 1] = iter.old_spte; in kvm_tdp_mmu_get_walk()