Lines Matching refs:mmu

51 		struct kvm_pgtable *pgt = kvm->arch.mmu.pgt;  in stage2_apply_range()
83 kvm_call_hyp(__kvm_tlb_flush_vmid, &kvm->arch.mmu); in kvm_flush_remote_tlbs()
165 static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size, in __unmap_stage2_range() argument
168 struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); in __unmap_stage2_range()
177 static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size) in unmap_stage2_range() argument
179 __unmap_stage2_range(mmu, start, size, true); in unmap_stage2_range()
446 int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu) in kvm_init_stage2_mmu() argument
451 if (mmu->pgt != NULL) { in kvm_init_stage2_mmu()
464 mmu->last_vcpu_ran = alloc_percpu(typeof(*mmu->last_vcpu_ran)); in kvm_init_stage2_mmu()
465 if (!mmu->last_vcpu_ran) { in kvm_init_stage2_mmu()
471 *per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1; in kvm_init_stage2_mmu()
473 mmu->arch = &kvm->arch; in kvm_init_stage2_mmu()
474 mmu->pgt = pgt; in kvm_init_stage2_mmu()
475 mmu->pgd_phys = __pa(pgt->pgd); in kvm_init_stage2_mmu()
476 mmu->vmid.vmid_gen = 0; in kvm_init_stage2_mmu()
521 unmap_stage2_range(&kvm->arch.mmu, gpa, vm_end - vm_start); in stage2_unmap_memslot()
553 void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu) in kvm_free_stage2_pgd() argument
555 struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); in kvm_free_stage2_pgd()
559 pgt = mmu->pgt; in kvm_free_stage2_pgd()
561 mmu->pgd_phys = 0; in kvm_free_stage2_pgd()
562 mmu->pgt = NULL; in kvm_free_stage2_pgd()
563 free_percpu(mmu->last_vcpu_ran); in kvm_free_stage2_pgd()
588 struct kvm_pgtable *pgt = kvm->arch.mmu.pgt; in kvm_phys_addr_ioremap()
622 static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end) in stage2_wp_range() argument
624 struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); in stage2_wp_range()
654 stage2_wp_range(&kvm->arch.mmu, start, end); in kvm_mmu_wp_memory_region()
678 stage2_wp_range(&kvm->arch.mmu, start, end); in kvm_mmu_write_protect_pt_masked()
1012 struct kvm_s2_mmu *mmu; in handle_access_fault() local
1017 mmu = vcpu->arch.hw_mmu; in handle_access_fault()
1018 kpte = kvm_pgtable_stage2_mkyoung(mmu->pgt, fault_ipa); in handle_access_fault()
1187 __unmap_stage2_range(&kvm->arch.mmu, gpa, size, may_block); in kvm_unmap_hva_handler()
1194 if (!kvm->arch.mmu.pgt) in kvm_unmap_hva_range()
1214 kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, gpa, PAGE_SIZE, in kvm_set_spte_handler()
1224 if (!kvm->arch.mmu.pgt) in kvm_set_spte_hva()
1244 kpte = kvm_pgtable_stage2_mkold(kvm->arch.mmu.pgt, gpa); in kvm_age_hva_handler()
1252 return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt, gpa); in kvm_test_age_hva_handler()
1257 if (!kvm->arch.mmu.pgt) in kvm_age_hva()
1265 if (!kvm->arch.mmu.pgt) in kvm_test_age_hva()
1467 unmap_stage2_range(&kvm->arch.mmu, mem->guest_phys_addr, mem->memory_size); in kvm_arch_prepare_memory_region()
1486 kvm_free_stage2_pgd(&kvm->arch.mmu); in kvm_arch_flush_shadow_all()
1496 unmap_stage2_range(&kvm->arch.mmu, gpa, size); in kvm_arch_flush_shadow_memslot()