| /OK3568_Linux_fs/kernel/arch/x86/kvm/mmu/ |
| H A D | mmu_audit.c | 32 typedef void (*inspect_spte_fn) (struct kvm_vcpu *vcpu, u64 *sptep, int level); 93 static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level) in audit_mappings() argument 100 sp = sptep_to_sp(sptep); in audit_mappings() 110 if (!is_shadow_present_pte(*sptep) || !is_last_spte(*sptep, level)) in audit_mappings() 113 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); in audit_mappings() 120 if ((*sptep & PT64_BASE_ADDR_MASK) != hpa) in audit_mappings() 123 hpa, *sptep); in audit_mappings() 126 static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep) in inspect_spte_has_rmap() argument 135 rev_sp = sptep_to_sp(sptep); in inspect_spte_has_rmap() 136 gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt); in inspect_spte_has_rmap() [all …]
|
| H A D | mmu.c | 150 u64 *sptep; member 169 ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \ 176 static void mmu_spte_set(u64 *sptep, u64 spte); 217 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, in mark_mmio_spte() argument 222 trace_mark_mmio_spte(sptep, gfn, mask); in mark_mmio_spte() 223 mmu_spte_set(sptep, mask); in mark_mmio_spte() 241 static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, in set_mmio_spte() argument 245 mark_mmio_spte(vcpu, sptep, gfn, access); in set_mmio_spte() 291 static void __set_spte(u64 *sptep, u64 spte) in __set_spte() argument 293 WRITE_ONCE(*sptep, spte); in __set_spte() [all …]
|
| H A D | mmutrace.h | 205 TP_PROTO(u64 *sptep, gfn_t gfn, u64 spte), 206 TP_ARGS(sptep, gfn, spte), 209 __field(void *, sptep) 216 __entry->sptep = sptep; 222 TP_printk("sptep:%p gfn %llx access %x gen %x", __entry->sptep, 250 u64 *sptep, u64 old_spte, int ret), 251 TP_ARGS(vcpu, cr2_or_gpa, error_code, sptep, old_spte, ret), 257 __field(u64 *, sptep) 267 __entry->sptep = sptep; 269 __entry->new_spte = *sptep; [all …]
|
| H A D | paging_tmpl.h | 591 u64 *sptep) in FNAME() 598 sp = sptep_to_sp(sptep); in FNAME() 604 return __direct_pte_prefetch(vcpu, sp, sptep); in FNAME() 606 i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1); in FNAME() 610 if (spte == sptep) in FNAME() 663 clear_sp_write_flooding_count(it.sptep); in FNAME() 664 drop_large_spte(vcpu, it.sptep); in FNAME() 667 if (!is_shadow_present_pte(*it.sptep)) { in FNAME() 682 link_shadow_page(vcpu, it.sptep, sp); in FNAME() 691 clear_sp_write_flooding_count(it.sptep); in FNAME() [all …]
|
| H A D | tdp_iter.c | 13 iter->sptep = iter->pt_path[iter->level - 1] + in tdp_iter_refresh_sptep() 15 iter->old_spte = READ_ONCE(*iter->sptep); in tdp_iter_refresh_sptep() 78 iter->old_spte = READ_ONCE(*iter->sptep); in try_step_down() 111 iter->sptep++; in try_step_side() 112 iter->old_spte = READ_ONCE(*iter->sptep); in try_step_side()
|
| H A D | mmu_internal.h | 71 static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep) in sptep_to_sp() argument 73 return to_shadow_page(__pa(sptep)); in sptep_to_sp()
|
| H A D | tdp_iter.h | 28 u64 *sptep; member
|
| H A D | tdp_mmu.c | 360 WRITE_ONCE(*iter->sptep, new_spte); in __tdp_mmu_set_spte() 535 trace_mark_mmio_spte(iter->sptep, iter->gfn, new_spte); in tdp_mmu_map_handle_target_level() 562 trace_kvm_mmu_set_spte(iter->level, iter->gfn, iter->sptep); in tdp_mmu_map_handle_target_level() 625 iter.old_spte = READ_ONCE(*iter.sptep); in kvm_tdp_mmu_map()
|
| /OK3568_Linux_fs/kernel/arch/s390/mm/ |
| H A D | pgtable.c | 649 pte_t *sptep, pte_t *tptep, pte_t pte) in ptep_shadow_pte() argument 657 spgste = pgste_get_lock(sptep); in ptep_shadow_pte() 658 spte = *sptep; in ptep_shadow_pte() 671 pgste_set_unlock(sptep, spgste); in ptep_shadow_pte()
|
| H A D | gmap.c | 2112 pte_t *sptep, *tptep; in gmap_shadow_page() local 2136 sptep = gmap_pte_op_walk(parent, paddr, &ptl); in gmap_shadow_page() 2137 if (sptep) { in gmap_shadow_page() 2147 rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte); in gmap_shadow_page()
|
| /OK3568_Linux_fs/kernel/arch/s390/include/asm/ |
| H A D | pgtable.h | 1165 pte_t *sptep, pte_t *tptep, pte_t pte);
|