Lines Matching refs:spte

166 #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte)	\  argument
169 ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \
170 __shadow_walk_next(&(_walker), spte))
176 static void mmu_spte_set(u64 *sptep, u64 spte);
226 static gfn_t get_mmio_spte_gfn(u64 spte) in get_mmio_spte_gfn() argument
228 u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask; in get_mmio_spte_gfn()
230 gpa |= (spte >> SHADOW_NONPRESENT_OR_RSVD_MASK_LEN) in get_mmio_spte_gfn()
236 static unsigned get_mmio_spte_access(u64 spte) in get_mmio_spte_access() argument
238 return spte & shadow_mmio_access_mask; in get_mmio_spte_access()
252 static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte) in check_mmio_spte() argument
261 spte_gen = get_mmio_spte_generation(spte); in check_mmio_spte()
263 trace_check_mmio_spte(spte, kvm_gen, spte_gen); in check_mmio_spte()
291 static void __set_spte(u64 *sptep, u64 spte) in __set_spte() argument
293 WRITE_ONCE(*sptep, spte); in __set_spte()
296 static void __update_clear_spte_fast(u64 *sptep, u64 spte) in __update_clear_spte_fast() argument
298 WRITE_ONCE(*sptep, spte); in __update_clear_spte_fast()
301 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) in __update_clear_spte_slow() argument
303 return xchg(sptep, spte); in __update_clear_spte_slow()
316 u64 spte; member
319 static void count_spte_clear(u64 *sptep, u64 spte) in count_spte_clear() argument
323 if (is_shadow_present_pte(spte)) in count_spte_clear()
331 static void __set_spte(u64 *sptep, u64 spte) in __set_spte() argument
336 sspte = (union split_spte)spte; in __set_spte()
350 static void __update_clear_spte_fast(u64 *sptep, u64 spte) in __update_clear_spte_fast() argument
355 sspte = (union split_spte)spte; in __update_clear_spte_fast()
366 count_spte_clear(sptep, spte); in __update_clear_spte_fast()
369 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) in __update_clear_spte_slow() argument
374 sspte = (union split_spte)spte; in __update_clear_spte_slow()
380 count_spte_clear(sptep, spte); in __update_clear_spte_slow()
382 return orig.spte; in __update_clear_spte_slow()
406 union split_spte spte, *orig = (union split_spte *)sptep; in __get_spte_lockless() local
413 spte.spte_low = orig->spte_low; in __get_spte_lockless()
416 spte.spte_high = orig->spte_high; in __get_spte_lockless()
419 if (unlikely(spte.spte_low != orig->spte_low || in __get_spte_lockless()
423 return spte.spte; in __get_spte_lockless()
427 static bool spte_has_volatile_bits(u64 spte) in spte_has_volatile_bits() argument
429 if (!is_shadow_present_pte(spte)) in spte_has_volatile_bits()
438 if (spte_can_locklessly_be_made_writable(spte) || in spte_has_volatile_bits()
439 is_access_track_spte(spte)) in spte_has_volatile_bits()
442 if (spte_ad_enabled(spte)) { in spte_has_volatile_bits()
443 if ((spte & shadow_accessed_mask) == 0 || in spte_has_volatile_bits()
444 (is_writable_pte(spte) && (spte & shadow_dirty_mask) == 0)) in spte_has_volatile_bits()
587 static u64 restore_acc_track_spte(u64 spte) in restore_acc_track_spte() argument
589 u64 new_spte = spte; in restore_acc_track_spte()
590 u64 saved_bits = (spte >> SHADOW_ACC_TRACK_SAVED_BITS_SHIFT) in restore_acc_track_spte()
593 WARN_ON_ONCE(spte_ad_enabled(spte)); in restore_acc_track_spte()
594 WARN_ON_ONCE(!is_access_track_spte(spte)); in restore_acc_track_spte()
607 u64 spte = mmu_spte_get_lockless(sptep); in mmu_spte_age() local
609 if (!is_accessed_spte(spte)) in mmu_spte_age()
612 if (spte_ad_enabled(spte)) { in mmu_spte_age()
620 if (is_writable_pte(spte)) in mmu_spte_age()
621 kvm_set_pfn_dirty(spte_to_pfn(spte)); in mmu_spte_age()
623 spte = mark_spte_for_access_track(spte); in mmu_spte_age()
624 mmu_spte_update_no_track(sptep, spte); in mmu_spte_age()
836 static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte, in pte_list_add() argument
843 rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte); in pte_list_add()
844 rmap_head->val = (unsigned long)spte; in pte_list_add()
846 rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte); in pte_list_add()
849 desc->sptes[1] = spte; in pte_list_add()
853 rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte); in pte_list_add()
867 desc->sptes[i] = spte; in pte_list_add()
895 static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head) in __pte_list_remove() argument
902 pr_err("%s: %p 0->BUG\n", __func__, spte); in __pte_list_remove()
905 rmap_printk("%s: %p 1->0\n", __func__, spte); in __pte_list_remove()
906 if ((u64 *)rmap_head->val != spte) { in __pte_list_remove()
907 pr_err("%s: %p 1->BUG\n", __func__, spte); in __pte_list_remove()
912 rmap_printk("%s: %p many->many\n", __func__, spte); in __pte_list_remove()
917 if (desc->sptes[i] == spte) { in __pte_list_remove()
926 pr_err("%s: %p many->many\n", __func__, spte); in __pte_list_remove()
965 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) in rmap_add() argument
970 sp = sptep_to_sp(spte); in rmap_add()
971 kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); in rmap_add()
973 return pte_list_add(vcpu, spte, rmap_head); in rmap_add()
976 static void rmap_remove(struct kvm *kvm, u64 *spte) in rmap_remove() argument
982 sp = sptep_to_sp(spte); in rmap_remove()
983 gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); in rmap_remove()
985 __pte_list_remove(spte, rmap_head); in rmap_remove()
1108 u64 spte = *sptep; in spte_write_protect() local
1110 if (!is_writable_pte(spte) && in spte_write_protect()
1111 !(pt_protect && spte_can_locklessly_be_made_writable(spte))) in spte_write_protect()
1117 spte &= ~SPTE_MMU_WRITEABLE; in spte_write_protect()
1118 spte = spte & ~PT_WRITABLE_MASK; in spte_write_protect()
1120 return mmu_spte_update(sptep, spte); in spte_write_protect()
1139 u64 spte = *sptep; in spte_clear_dirty() local
1143 MMU_WARN_ON(!spte_ad_enabled(spte)); in spte_clear_dirty()
1144 spte &= ~shadow_dirty_mask; in spte_clear_dirty()
1145 return mmu_spte_update(sptep, spte); in spte_clear_dirty()
1181 u64 spte = *sptep; in spte_set_dirty() local
1190 spte |= shadow_dirty_mask; in spte_set_dirty()
1192 return mmu_spte_update(sptep, spte); in spte_set_dirty()
1560 static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) in rmap_recycle() argument
1565 sp = sptep_to_sp(spte); in rmap_recycle()
1683 static void mark_unsync(u64 *spte);
1694 static void mark_unsync(u64 *spte) in mark_unsync() argument
1699 sp = sptep_to_sp(spte); in mark_unsync()
1700 index = spte - sp->spt; in mark_unsync()
2015 static void clear_sp_write_flooding_count(u64 *spte) in clear_sp_write_flooding_count() argument
2017 __clear_sp_write_flooding_count(sptep_to_sp(spte)); in clear_sp_write_flooding_count()
2162 u64 spte) in __shadow_walk_next() argument
2164 if (is_last_spte(spte, iterator->level)) { in __shadow_walk_next()
2169 iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK; in __shadow_walk_next()
2181 u64 spte; in link_shadow_page() local
2185 spte = make_nonleaf_spte(sp->spt, sp_ad_disabled(sp)); in link_shadow_page()
2187 mmu_spte_set(sptep, spte); in link_shadow_page()
2219 u64 *spte, struct list_head *invalid_list) in mmu_page_zap_pte() argument
2224 pte = *spte; in mmu_page_zap_pte()
2227 drop_spte(kvm, spte); in mmu_page_zap_pte()
2232 drop_parent_pte(child, spte); in mmu_page_zap_pte()
2245 mmu_spte_clear_no_track(spte); in mmu_page_zap_pte()
2565 u64 spte; in set_spte() local
2575 can_unsync, host_writable, sp_ad_disabled(sp), &spte); in set_spte()
2577 if (spte & PT_WRITABLE_MASK) in set_spte()
2580 if (*sptep == spte) in set_spte()
2582 else if (mmu_spte_update(sptep, spte)) in set_spte()
2705 u64 *spte, *start = NULL; in __direct_pte_prefetch() local
2711 spte = sp->spt + i; in __direct_pte_prefetch()
2713 for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) { in __direct_pte_prefetch()
2714 if (is_shadow_present_pte(*spte) || spte == sptep) { in __direct_pte_prefetch()
2717 if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0) in __direct_pte_prefetch()
2721 start = spte; in __direct_pte_prefetch()
2828 void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level, in disallowed_hugepage_adjust() argument
2834 is_shadow_present_pte(spte) && in disallowed_hugepage_adjust()
2835 !is_large_pte(spte)) { in disallowed_hugepage_adjust()
3019 static bool is_access_allowed(u32 fault_err_code, u64 spte) in is_access_allowed() argument
3022 return is_executable_pte(spte); in is_access_allowed()
3025 return is_writable_pte(spte); in is_access_allowed()
3028 return spte & PT_PRESENT_MASK; in is_access_allowed()
3040 u64 spte = 0ull; in fast_page_fault() local
3051 for_each_shadow_entry_lockless(vcpu, cr2_or_gpa, iterator, spte) in fast_page_fault()
3052 if (!is_shadow_present_pte(spte)) in fast_page_fault()
3056 if (!is_last_spte(spte, sp->role.level)) in fast_page_fault()
3069 if (is_access_allowed(error_code, spte)) { in fast_page_fault()
3074 new_spte = spte; in fast_page_fault()
3076 if (is_access_track_spte(spte)) in fast_page_fault()
3085 spte_can_locklessly_be_made_writable(spte)) { in fast_page_fault()
3104 if (new_spte == spte || in fast_page_fault()
3113 if (fast_pf_fix_direct_spte(vcpu, sp, iterator.sptep, spte, in fast_page_fault()
3128 spte, ret); in fast_page_fault()
3495 u64 spte; in get_walk() local
3502 __shadow_walk_next(&iterator, spte)) { in get_walk()
3504 spte = mmu_spte_get_lockless(iterator.sptep); in get_walk()
3506 sptes[leaf - 1] = spte; in get_walk()
3508 if (!is_shadow_present_pte(spte)) in get_walk()
3570 u64 spte; in handle_mmio_page_fault() local
3576 reserved = get_mmio_spte(vcpu, addr, &spte); in handle_mmio_page_fault()
3580 if (is_mmio_spte(spte)) { in handle_mmio_page_fault()
3581 gfn_t gfn = get_mmio_spte_gfn(spte); in handle_mmio_page_fault()
3582 unsigned int access = get_mmio_spte_access(spte); in handle_mmio_page_fault()
3584 if (!check_mmio_spte(vcpu, spte)) in handle_mmio_page_fault()
3625 u64 spte; in shadow_page_table_clear_flood() local
3628 for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) { in shadow_page_table_clear_flood()
3630 if (!is_shadow_present_pte(spte)) in shadow_page_table_clear_flood()
4958 u64 *spte; in get_written_sptes() local
4982 spte = &sp->spt[page_offset / sizeof(*spte)]; in get_written_sptes()
4983 return spte; in get_written_sptes()
4993 u64 entry, gentry, *spte; in kvm_mmu_pte_write() local
5030 spte = get_written_sptes(sp, gpa, &npte); in kvm_mmu_pte_write()
5031 if (!spte) in kvm_mmu_pte_write()
5036 entry = *spte; in kvm_mmu_pte_write()
5037 mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL); in kvm_mmu_pte_write()
5040 if (need_remote_flush(entry, *spte)) in kvm_mmu_pte_write()
5042 ++spte; in kvm_mmu_pte_write()