Lines Matching refs:sptep

150 	u64 *sptep;  member
169 ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \
176 static void mmu_spte_set(u64 *sptep, u64 spte);
217 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, in mark_mmio_spte() argument
222 trace_mark_mmio_spte(sptep, gfn, mask); in mark_mmio_spte()
223 mmu_spte_set(sptep, mask); in mark_mmio_spte()
241 static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, in set_mmio_spte() argument
245 mark_mmio_spte(vcpu, sptep, gfn, access); in set_mmio_spte()
291 static void __set_spte(u64 *sptep, u64 spte) in __set_spte() argument
293 WRITE_ONCE(*sptep, spte); in __set_spte()
296 static void __update_clear_spte_fast(u64 *sptep, u64 spte) in __update_clear_spte_fast() argument
298 WRITE_ONCE(*sptep, spte); in __update_clear_spte_fast()
301 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) in __update_clear_spte_slow() argument
303 return xchg(sptep, spte); in __update_clear_spte_slow()
306 static u64 __get_spte_lockless(u64 *sptep) in __get_spte_lockless() argument
308 return READ_ONCE(*sptep); in __get_spte_lockless()
319 static void count_spte_clear(u64 *sptep, u64 spte) in count_spte_clear() argument
321 struct kvm_mmu_page *sp = sptep_to_sp(sptep); in count_spte_clear()
331 static void __set_spte(u64 *sptep, u64 spte) in __set_spte() argument
335 ssptep = (union split_spte *)sptep; in __set_spte()
350 static void __update_clear_spte_fast(u64 *sptep, u64 spte) in __update_clear_spte_fast() argument
354 ssptep = (union split_spte *)sptep; in __update_clear_spte_fast()
366 count_spte_clear(sptep, spte); in __update_clear_spte_fast()
369 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) in __update_clear_spte_slow() argument
373 ssptep = (union split_spte *)sptep; in __update_clear_spte_slow()
380 count_spte_clear(sptep, spte); in __update_clear_spte_slow()
403 static u64 __get_spte_lockless(u64 *sptep) in __get_spte_lockless() argument
405 struct kvm_mmu_page *sp = sptep_to_sp(sptep); in __get_spte_lockless()
406 union split_spte spte, *orig = (union split_spte *)sptep; in __get_spte_lockless()
457 static void mmu_spte_set(u64 *sptep, u64 new_spte) in mmu_spte_set() argument
459 WARN_ON(is_shadow_present_pte(*sptep)); in mmu_spte_set()
460 __set_spte(sptep, new_spte); in mmu_spte_set()
467 static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte) in mmu_spte_update_no_track() argument
469 u64 old_spte = *sptep; in mmu_spte_update_no_track()
474 mmu_spte_set(sptep, new_spte); in mmu_spte_update_no_track()
479 __update_clear_spte_fast(sptep, new_spte); in mmu_spte_update_no_track()
481 old_spte = __update_clear_spte_slow(sptep, new_spte); in mmu_spte_update_no_track()
499 static bool mmu_spte_update(u64 *sptep, u64 new_spte) in mmu_spte_update() argument
502 u64 old_spte = mmu_spte_update_no_track(sptep, new_spte); in mmu_spte_update()
540 static int mmu_spte_clear_track_bits(u64 *sptep) in mmu_spte_clear_track_bits() argument
543 u64 old_spte = *sptep; in mmu_spte_clear_track_bits()
546 __update_clear_spte_fast(sptep, 0ull); in mmu_spte_clear_track_bits()
548 old_spte = __update_clear_spte_slow(sptep, 0ull); in mmu_spte_clear_track_bits()
576 static void mmu_spte_clear_no_track(u64 *sptep) in mmu_spte_clear_no_track() argument
578 __update_clear_spte_fast(sptep, 0ull); in mmu_spte_clear_no_track()
581 static u64 mmu_spte_get_lockless(u64 *sptep) in mmu_spte_get_lockless() argument
583 return __get_spte_lockless(sptep); in mmu_spte_get_lockless()
605 static bool mmu_spte_age(u64 *sptep) in mmu_spte_age() argument
607 u64 spte = mmu_spte_get_lockless(sptep); in mmu_spte_age()
614 (unsigned long *)sptep); in mmu_spte_age()
624 mmu_spte_update_no_track(sptep, spte); in mmu_spte_age()
931 static void pte_list_remove(struct kvm_rmap_head *rmap_head, u64 *sptep) in pte_list_remove() argument
933 mmu_spte_clear_track_bits(sptep); in pte_list_remove()
934 __pte_list_remove(sptep, rmap_head); in pte_list_remove()
1008 u64 *sptep; in rmap_get_first() local
1015 sptep = (u64 *)rmap_head->val; in rmap_get_first()
1021 sptep = iter->desc->sptes[iter->pos]; in rmap_get_first()
1023 BUG_ON(!is_shadow_present_pte(*sptep)); in rmap_get_first()
1024 return sptep; in rmap_get_first()
1034 u64 *sptep; in rmap_get_next() local
1039 sptep = iter->desc->sptes[iter->pos]; in rmap_get_next()
1040 if (sptep) in rmap_get_next()
1049 sptep = iter->desc->sptes[iter->pos]; in rmap_get_next()
1056 BUG_ON(!is_shadow_present_pte(*sptep)); in rmap_get_next()
1057 return sptep; in rmap_get_next()
1064 static void drop_spte(struct kvm *kvm, u64 *sptep) in drop_spte() argument
1066 if (mmu_spte_clear_track_bits(sptep)) in drop_spte()
1067 rmap_remove(kvm, sptep); in drop_spte()
1071 static bool __drop_large_spte(struct kvm *kvm, u64 *sptep) in __drop_large_spte() argument
1073 if (is_large_pte(*sptep)) { in __drop_large_spte()
1074 WARN_ON(sptep_to_sp(sptep)->role.level == PG_LEVEL_4K); in __drop_large_spte()
1075 drop_spte(kvm, sptep); in __drop_large_spte()
1083 static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) in drop_large_spte() argument
1085 if (__drop_large_spte(vcpu->kvm, sptep)) { in drop_large_spte()
1086 struct kvm_mmu_page *sp = sptep_to_sp(sptep); in drop_large_spte()
1106 static bool spte_write_protect(u64 *sptep, bool pt_protect) in spte_write_protect() argument
1108 u64 spte = *sptep; in spte_write_protect()
1114 rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep); in spte_write_protect()
1120 return mmu_spte_update(sptep, spte); in spte_write_protect()
1127 u64 *sptep; in __rmap_write_protect() local
1131 for_each_rmap_spte(rmap_head, &iter, sptep) in __rmap_write_protect()
1132 flush |= spte_write_protect(sptep, pt_protect); in __rmap_write_protect()
1137 static bool spte_clear_dirty(u64 *sptep) in spte_clear_dirty() argument
1139 u64 spte = *sptep; in spte_clear_dirty()
1141 rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep); in spte_clear_dirty()
1145 return mmu_spte_update(sptep, spte); in spte_clear_dirty()
1148 static bool spte_wrprot_for_clear_dirty(u64 *sptep) in spte_wrprot_for_clear_dirty() argument
1151 (unsigned long *)sptep); in spte_wrprot_for_clear_dirty()
1152 if (was_writable && !spte_ad_enabled(*sptep)) in spte_wrprot_for_clear_dirty()
1153 kvm_set_pfn_dirty(spte_to_pfn(*sptep)); in spte_wrprot_for_clear_dirty()
1166 u64 *sptep; in __rmap_clear_dirty() local
1170 for_each_rmap_spte(rmap_head, &iter, sptep) in __rmap_clear_dirty()
1171 if (spte_ad_need_write_protect(*sptep)) in __rmap_clear_dirty()
1172 flush |= spte_wrprot_for_clear_dirty(sptep); in __rmap_clear_dirty()
1174 flush |= spte_clear_dirty(sptep); in __rmap_clear_dirty()
1179 static bool spte_set_dirty(u64 *sptep) in spte_set_dirty() argument
1181 u64 spte = *sptep; in spte_set_dirty()
1183 rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep); in spte_set_dirty()
1192 return mmu_spte_update(sptep, spte); in spte_set_dirty()
1197 u64 *sptep; in __rmap_set_dirty() local
1201 for_each_rmap_spte(rmap_head, &iter, sptep) in __rmap_set_dirty()
1202 if (spte_ad_enabled(*sptep)) in __rmap_set_dirty()
1203 flush |= spte_set_dirty(sptep); in __rmap_set_dirty()
1317 u64 *sptep; in kvm_zap_rmapp() local
1321 while ((sptep = rmap_get_first(rmap_head, &iter))) { in kvm_zap_rmapp()
1322 rmap_printk("%s: spte %p %llx.\n", __func__, sptep, *sptep); in kvm_zap_rmapp()
1324 pte_list_remove(rmap_head, sptep); in kvm_zap_rmapp()
1342 u64 *sptep; in kvm_set_pte_rmapp() local
1353 for_each_rmap_spte(rmap_head, &iter, sptep) { in kvm_set_pte_rmapp()
1355 sptep, *sptep, gfn, level); in kvm_set_pte_rmapp()
1360 pte_list_remove(rmap_head, sptep); in kvm_set_pte_rmapp()
1364 *sptep, new_pfn); in kvm_set_pte_rmapp()
1366 mmu_spte_clear_track_bits(sptep); in kvm_set_pte_rmapp()
1367 mmu_spte_set(sptep, new_spte); in kvm_set_pte_rmapp()
1534 u64 *sptep; in kvm_age_rmapp() local
1538 for_each_rmap_spte(rmap_head, &iter, sptep) in kvm_age_rmapp()
1539 young |= mmu_spte_age(sptep); in kvm_age_rmapp()
1549 u64 *sptep; in kvm_test_age_rmapp() local
1552 for_each_rmap_spte(rmap_head, &iter, sptep) in kvm_test_age_rmapp()
1553 if (is_accessed_spte(*sptep)) in kvm_test_age_rmapp()
1686 u64 *sptep; in kvm_mmu_mark_parents_unsync() local
1689 for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) { in kvm_mmu_mark_parents_unsync()
1690 mark_unsync(sptep); in kvm_mmu_mark_parents_unsync()
2157 iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index; in shadow_walk_okay()
2175 __shadow_walk_next(iterator, *iterator->sptep); in shadow_walk_next()
2178 static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep, in link_shadow_page() argument
2187 mmu_spte_set(sptep, spte); in link_shadow_page()
2189 mmu_page_add_parent_pte(vcpu, sp, sptep); in link_shadow_page()
2192 mark_unsync(sptep); in link_shadow_page()
2195 static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, in validate_direct_spte() argument
2198 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) { in validate_direct_spte()
2208 child = to_shadow_page(*sptep & PT64_BASE_ADDR_MASK); in validate_direct_spte()
2212 drop_parent_pte(child, sptep); in validate_direct_spte()
2265 u64 *sptep; in kvm_mmu_unlink_parents() local
2268 while ((sptep = rmap_get_first(&sp->parent_ptes, &iter))) in kvm_mmu_unlink_parents()
2269 drop_parent_pte(sp, sptep); in kvm_mmu_unlink_parents()
2560 static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, in set_spte() argument
2569 if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access)) in set_spte()
2572 sp = sptep_to_sp(sptep); in set_spte()
2574 ret = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative, in set_spte()
2580 if (*sptep == spte) in set_spte()
2582 else if (mmu_spte_update(sptep, spte)) in set_spte()
2587 static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, in mmu_set_spte() argument
2599 *sptep, write_fault, gfn); in mmu_set_spte()
2601 if (is_shadow_present_pte(*sptep)) { in mmu_set_spte()
2606 if (level > PG_LEVEL_4K && !is_large_pte(*sptep)) { in mmu_set_spte()
2608 u64 pte = *sptep; in mmu_set_spte()
2611 drop_parent_pte(child, sptep); in mmu_set_spte()
2613 } else if (pfn != spte_to_pfn(*sptep)) { in mmu_set_spte()
2615 spte_to_pfn(*sptep), pfn); in mmu_set_spte()
2616 drop_spte(vcpu->kvm, sptep); in mmu_set_spte()
2622 set_spte_ret = set_spte(vcpu, sptep, pte_access, level, gfn, pfn, in mmu_set_spte()
2634 if (unlikely(is_mmio_spte(*sptep))) in mmu_set_spte()
2646 pgprintk("%s: setting spte %llx\n", __func__, *sptep); in mmu_set_spte()
2647 trace_kvm_mmu_set_spte(level, gfn, sptep); in mmu_set_spte()
2648 if (!was_rmapped && is_large_pte(*sptep)) in mmu_set_spte()
2651 if (is_shadow_present_pte(*sptep)) { in mmu_set_spte()
2653 rmap_count = rmap_add(vcpu, sptep, gfn); in mmu_set_spte()
2655 rmap_recycle(vcpu, sptep, gfn); in mmu_set_spte()
2703 struct kvm_mmu_page *sp, u64 *sptep) in __direct_pte_prefetch() argument
2710 i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1); in __direct_pte_prefetch()
2714 if (is_shadow_present_pte(*spte) || spte == sptep) { in __direct_pte_prefetch()
2725 static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) in direct_pte_prefetch() argument
2729 sp = sptep_to_sp(sptep); in direct_pte_prefetch()
2742 __direct_pte_prefetch(vcpu, sp, sptep); in direct_pte_prefetch()
2877 disallowed_hugepage_adjust(*it.sptep, gfn, it.level, in __direct_map()
2884 drop_large_spte(vcpu, it.sptep); in __direct_map()
2885 if (!is_shadow_present_pte(*it.sptep)) { in __direct_map()
2889 link_shadow_page(vcpu, it.sptep, sp); in __direct_map()
2896 ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL, in __direct_map()
2902 direct_pte_prefetch(vcpu, it.sptep); in __direct_map()
2986 u64 *sptep, u64 old_spte, u64 new_spte) in fast_pf_fix_direct_spte() argument
3004 if (cmpxchg64(sptep, old_spte, new_spte) != old_spte) in fast_pf_fix_direct_spte()
3012 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); in fast_pf_fix_direct_spte()
3055 sp = sptep_to_sp(iterator.sptep); in fast_page_fault()
3113 if (fast_pf_fix_direct_spte(vcpu, sp, iterator.sptep, spte, in fast_page_fault()
3127 trace_fast_page_fault(vcpu, cr2_or_gpa, error_code, iterator.sptep, in fast_page_fault()
3504 spte = mmu_spte_get_lockless(iterator.sptep); in get_walk()
3518 static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) in get_mmio_spte() argument
3526 *sptep = 0ull; in get_mmio_spte()
3536 *sptep = 0ull; in get_mmio_spte()
3563 *sptep = sptes[leaf - 1]; in get_mmio_spte()
3629 clear_sp_write_flooding_count(iterator.sptep); in shadow_page_table_clear_flood()
3942 static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, in sync_mmio_spte() argument
3945 if (unlikely(is_mmio_spte(*sptep))) { in sync_mmio_spte()
3946 if (gfn != get_mmio_spte_gfn(*sptep)) { in sync_mmio_spte()
3947 mmu_spte_clear_no_track(sptep); in sync_mmio_spte()
3952 mark_mmio_spte(vcpu, sptep, gfn, access); in sync_mmio_spte()
5574 u64 *sptep; in kvm_mmu_zap_collapsible_spte() local
5581 for_each_rmap_spte(rmap_head, &iter, sptep) { in kvm_mmu_zap_collapsible_spte()
5582 sp = sptep_to_sp(sptep); in kvm_mmu_zap_collapsible_spte()
5583 pfn = spte_to_pfn(*sptep); in kvm_mmu_zap_collapsible_spte()
5595 pte_list_remove(rmap_head, sptep); in kvm_mmu_zap_collapsible_spte()