Lines Matching refs:FNAME

26 	#define FNAME(name) paging##64_##name  macro
44 #define FNAME(name) paging##32_##name macro
58 #define FNAME(name) ept_##name macro
78 #define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
105 static inline void FNAME(protect_clean_gpte)(struct kvm_mmu *mmu, unsigned *access, in FNAME() function
123 static inline int FNAME(is_present_gpte)(unsigned long pte) in FNAME() function
132 static bool FNAME(is_bad_mt_xwr)(struct rsvd_bits_validate *rsvd_check, u64 gpte) in FNAME() function
141 static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level) in FNAME() function
144 FNAME(is_bad_mt_xwr)(&mmu->guest_rsvd_check, gpte); in FNAME()
147 static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in FNAME() function
186 static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu, in FNAME() function
190 if (!FNAME(is_present_gpte)(gpte)) in FNAME()
198 if (FNAME(is_rsvd_bits_set)(vcpu->arch.mmu, gpte, PG_LEVEL_4K)) in FNAME()
214 static inline unsigned FNAME(gpte_access)(u64 gpte) in FNAME() function
232 static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, in FNAME() function
284 ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte); in FNAME()
294 static inline unsigned FNAME(gpte_pkeys)(struct kvm_vcpu *vcpu, u64 gpte) in FNAME() function
308 static int FNAME(walk_addr_generic)(struct guest_walker *walker, in FNAME() function
341 if (!FNAME(is_present_gpte)(pte)) in FNAME()
409 if (unlikely(!FNAME(is_present_gpte)(pte))) in FNAME()
412 if (unlikely(FNAME(is_rsvd_bits_set)(mmu, pte, walker->level))) { in FNAME()
420 walker->pt_access[walker->level - 1] = FNAME(gpte_access)(pt_access ^ walk_nx_mask); in FNAME()
423 pte_pkey = FNAME(gpte_pkeys)(vcpu, pte); in FNAME()
427 walker->pte_access = FNAME(gpte_access)(pte_access ^ walk_nx_mask); in FNAME()
445 FNAME(protect_clean_gpte)(mmu, &walker->pte_access, pte); in FNAME()
456 ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, in FNAME()
510 static int FNAME(walk_addr)(struct guest_walker *walker, in FNAME() function
513 return FNAME(walk_addr_generic)(walker, vcpu, vcpu->arch.mmu, addr, in FNAME()
518 static int FNAME(walk_addr_nested)(struct guest_walker *walker, in FNAME() function
522 return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu, in FNAME()
528 FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, in FNAME() function
535 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte)) in FNAME()
541 pte_access = sp->role.access & FNAME(gpte_access)(gpte); in FNAME()
542 FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte); in FNAME()
559 static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, in FNAME() function
564 FNAME(prefetch_gpte)(vcpu, sp, spte, gpte, false); in FNAME()
567 static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu, in FNAME() function
590 static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw, in FNAME() function
616 if (!FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i], true)) in FNAME()
626 static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr, in FNAME() function
652 if (FNAME(gpte_changed)(vcpu, gw, top_level)) in FNAME()
678 if (FNAME(gpte_changed)(vcpu, gw, it.level - 1)) in FNAME()
723 FNAME(pte_prefetch)(vcpu, gw, it.sptep); in FNAME()
749 FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu, in FNAME() function
785 static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, in FNAME() function
808 r = FNAME(walk_addr)(&walker, vcpu, addr, error_code); in FNAME()
832 is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu, in FNAME()
879 r = FNAME(fetch)(vcpu, addr, &walker, error_code, max_level, pfn, in FNAME()
889 static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp) in FNAME() function
901 static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) in FNAME() function
936 pte_gpa = FNAME(get_level1_sp_gpa)(sp); in FNAME()
951 FNAME(update_pte)(vcpu, sp, sptep, &gpte); in FNAME()
961 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gpa_t addr, u32 access, in FNAME() function
968 r = FNAME(walk_addr)(&walker, vcpu, addr, access); in FNAME()
981 static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gpa_t vaddr, in FNAME() function
994 r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access); in FNAME()
1019 static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) in FNAME() function
1029 first_pte_gpa = FNAME(get_level1_sp_gpa)(sp); in FNAME()
1046 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { in FNAME()
1059 pte_access &= FNAME(gpte_access)(gpte); in FNAME()
1060 FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte); in FNAME()
1095 #undef FNAME