Lines Matching refs:kvm
42 static int stage2_apply_range(struct kvm *kvm, phys_addr_t addr, in stage2_apply_range() argument
51 struct kvm_pgtable *pgt = kvm->arch.mmu.pgt; in stage2_apply_range()
55 next = stage2_pgd_addr_end(kvm, addr, end); in stage2_apply_range()
61 cond_resched_lock(&kvm->mmu_lock); in stage2_apply_range()
67 #define stage2_apply_range_resched(kvm, addr, end, fn) \ argument
68 stage2_apply_range(kvm, addr, end, fn, true)
81 void kvm_flush_remote_tlbs(struct kvm *kvm) in kvm_flush_remote_tlbs() argument
83 kvm_call_hyp(__kvm_tlb_flush_vmid, &kvm->arch.mmu); in kvm_flush_remote_tlbs()
168 struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); in __unmap_stage2_range() local
171 assert_spin_locked(&kvm->mmu_lock); in __unmap_stage2_range()
173 WARN_ON(stage2_apply_range(kvm, start, end, kvm_pgtable_stage2_unmap, in __unmap_stage2_range()
182 static void stage2_flush_memslot(struct kvm *kvm, in stage2_flush_memslot() argument
188 stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_flush); in stage2_flush_memslot()
198 static void stage2_flush_vm(struct kvm *kvm) in stage2_flush_vm() argument
204 idx = srcu_read_lock(&kvm->srcu); in stage2_flush_vm()
205 spin_lock(&kvm->mmu_lock); in stage2_flush_vm()
207 slots = kvm_memslots(kvm); in stage2_flush_vm()
209 stage2_flush_memslot(kvm, memslot); in stage2_flush_vm()
211 spin_unlock(&kvm->mmu_lock); in stage2_flush_vm()
212 srcu_read_unlock(&kvm->srcu, idx); in stage2_flush_vm()
446 int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu) in kvm_init_stage2_mmu() argument
460 err = kvm_pgtable_stage2_init(pgt, &kvm->arch, &kvm_s2_mm_ops); in kvm_init_stage2_mmu()
473 mmu->arch = &kvm->arch; in kvm_init_stage2_mmu()
486 static void stage2_unmap_memslot(struct kvm *kvm, in stage2_unmap_memslot() argument
521 unmap_stage2_range(&kvm->arch.mmu, gpa, vm_end - vm_start); in stage2_unmap_memslot()
534 void stage2_unmap_vm(struct kvm *kvm) in stage2_unmap_vm() argument
540 idx = srcu_read_lock(&kvm->srcu); in stage2_unmap_vm()
542 spin_lock(&kvm->mmu_lock); in stage2_unmap_vm()
544 slots = kvm_memslots(kvm); in stage2_unmap_vm()
546 stage2_unmap_memslot(kvm, memslot); in stage2_unmap_vm()
548 spin_unlock(&kvm->mmu_lock); in stage2_unmap_vm()
550 srcu_read_unlock(&kvm->srcu, idx); in stage2_unmap_vm()
555 struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); in kvm_free_stage2_pgd() local
558 spin_lock(&kvm->mmu_lock); in kvm_free_stage2_pgd()
565 spin_unlock(&kvm->mmu_lock); in kvm_free_stage2_pgd()
582 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, in kvm_phys_addr_ioremap() argument
588 struct kvm_pgtable *pgt = kvm->arch.mmu.pgt; in kvm_phys_addr_ioremap()
598 kvm_mmu_cache_min_pages(kvm)); in kvm_phys_addr_ioremap()
602 spin_lock(&kvm->mmu_lock); in kvm_phys_addr_ioremap()
605 spin_unlock(&kvm->mmu_lock); in kvm_phys_addr_ioremap()
624 struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); in stage2_wp_range() local
625 stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_wrprotect); in stage2_wp_range()
641 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot) in kvm_mmu_wp_memory_region() argument
643 struct kvm_memslots *slots = kvm_memslots(kvm); in kvm_mmu_wp_memory_region()
653 spin_lock(&kvm->mmu_lock); in kvm_mmu_wp_memory_region()
654 stage2_wp_range(&kvm->arch.mmu, start, end); in kvm_mmu_wp_memory_region()
655 spin_unlock(&kvm->mmu_lock); in kvm_mmu_wp_memory_region()
656 kvm_flush_remote_tlbs(kvm); in kvm_mmu_wp_memory_region()
670 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, in kvm_mmu_write_protect_pt_masked() argument
678 stage2_wp_range(&kvm->arch.mmu, start, end); in kvm_mmu_write_protect_pt_masked()
688 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, in kvm_arch_mmu_enable_log_dirty_pt_masked() argument
692 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); in kvm_arch_mmu_enable_log_dirty_pt_masked()
833 struct kvm *kvm = vcpu->kvm; in user_mem_abort() local
914 kvm_mmu_cache_min_pages(kvm)); in user_mem_abort()
919 mmu_seq = vcpu->kvm->mmu_notifier_seq; in user_mem_abort()
931 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable); in user_mem_abort()
953 spin_lock(&kvm->mmu_lock); in user_mem_abort()
955 if (mmu_notifier_retry(kvm, mmu_seq)) in user_mem_abort()
997 mark_page_dirty(kvm, gfn); in user_mem_abort()
1001 spin_unlock(&kvm->mmu_lock); in user_mem_abort()
1016 spin_lock(&vcpu->kvm->mmu_lock); in handle_access_fault()
1019 spin_unlock(&vcpu->kvm->mmu_lock); in handle_access_fault()
1077 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_handle_guest_abort()
1080 memslot = gfn_to_memslot(vcpu->kvm, gfn); in kvm_handle_guest_abort()
1129 VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm)); in kvm_handle_guest_abort()
1146 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_handle_guest_abort()
1150 static int handle_hva_to_gpa(struct kvm *kvm, in handle_hva_to_gpa() argument
1153 int (*handler)(struct kvm *kvm, in handle_hva_to_gpa() argument
1162 slots = kvm_memslots(kvm); in handle_hva_to_gpa()
1176 ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data); in handle_hva_to_gpa()
1182 static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) in kvm_unmap_hva_handler() argument
1187 __unmap_stage2_range(&kvm->arch.mmu, gpa, size, may_block); in kvm_unmap_hva_handler()
1191 int kvm_unmap_hva_range(struct kvm *kvm, in kvm_unmap_hva_range() argument
1194 if (!kvm->arch.mmu.pgt) in kvm_unmap_hva_range()
1198 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &flags); in kvm_unmap_hva_range()
1202 static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) in kvm_set_spte_handler() argument
1214 kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, gpa, PAGE_SIZE, in kvm_set_spte_handler()
1219 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) in kvm_set_spte_hva() argument
1224 if (!kvm->arch.mmu.pgt) in kvm_set_spte_hva()
1234 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pfn); in kvm_set_spte_hva()
1238 static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) in kvm_age_hva_handler() argument
1244 kpte = kvm_pgtable_stage2_mkold(kvm->arch.mmu.pgt, gpa); in kvm_age_hva_handler()
1249 static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) in kvm_test_age_hva_handler() argument
1252 return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt, gpa); in kvm_test_age_hva_handler()
1255 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) in kvm_age_hva() argument
1257 if (!kvm->arch.mmu.pgt) in kvm_age_hva()
1260 return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL); in kvm_age_hva()
1263 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) in kvm_test_age_hva() argument
1265 if (!kvm->arch.mmu.pgt) in kvm_test_age_hva()
1268 return handle_hva_to_gpa(kvm, hva, hva + PAGE_SIZE, in kvm_test_age_hva()
1369 void kvm_arch_commit_memory_region(struct kvm *kvm, in kvm_arch_commit_memory_region() argument
1386 if (!kvm_dirty_log_manual_protect_and_init_set(kvm)) { in kvm_arch_commit_memory_region()
1387 kvm_mmu_wp_memory_region(kvm, mem->slot); in kvm_arch_commit_memory_region()
1392 int kvm_arch_prepare_memory_region(struct kvm *kvm, in kvm_arch_prepare_memory_region() argument
1410 if ((memslot->base_gfn + memslot->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT)) in kvm_arch_prepare_memory_region()
1453 ret = kvm_phys_addr_ioremap(kvm, gpa, pa, in kvm_arch_prepare_memory_region()
1465 spin_lock(&kvm->mmu_lock); in kvm_arch_prepare_memory_region()
1467 unmap_stage2_range(&kvm->arch.mmu, mem->guest_phys_addr, mem->memory_size); in kvm_arch_prepare_memory_region()
1469 stage2_flush_memslot(kvm, memslot); in kvm_arch_prepare_memory_region()
1470 spin_unlock(&kvm->mmu_lock); in kvm_arch_prepare_memory_region()
1476 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) in kvm_arch_free_memslot() argument
1480 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) in kvm_arch_memslots_updated() argument
1484 void kvm_arch_flush_shadow_all(struct kvm *kvm) in kvm_arch_flush_shadow_all() argument
1486 kvm_free_stage2_pgd(&kvm->arch.mmu); in kvm_arch_flush_shadow_all()
1489 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, in kvm_arch_flush_shadow_memslot() argument
1495 spin_lock(&kvm->mmu_lock); in kvm_arch_flush_shadow_memslot()
1496 unmap_stage2_range(&kvm->arch.mmu, gpa, size); in kvm_arch_flush_shadow_memslot()
1497 spin_unlock(&kvm->mmu_lock); in kvm_arch_flush_shadow_memslot()
1544 stage2_flush_vm(vcpu->kvm); in kvm_set_way_flush()
1559 stage2_flush_vm(vcpu->kvm); in kvm_toggle_cache()