Lines Matching refs:memslot
70 static bool memslot_is_logging(struct kvm_memory_slot *memslot) in memslot_is_logging() argument
72 return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY); in memslot_is_logging()
183 struct kvm_memory_slot *memslot) in stage2_flush_memslot() argument
185 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; in stage2_flush_memslot()
186 phys_addr_t end = addr + PAGE_SIZE * memslot->npages; in stage2_flush_memslot()
201 struct kvm_memory_slot *memslot; in stage2_flush_vm() local
208 kvm_for_each_memslot(memslot, slots) in stage2_flush_vm()
209 stage2_flush_memslot(kvm, memslot); in stage2_flush_vm()
487 struct kvm_memory_slot *memslot) in stage2_unmap_memslot() argument
489 hva_t hva = memslot->userspace_addr; in stage2_unmap_memslot()
490 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; in stage2_unmap_memslot()
491 phys_addr_t size = PAGE_SIZE * memslot->npages; in stage2_unmap_memslot()
520 gpa_t gpa = addr + (vm_start - memslot->userspace_addr); in stage2_unmap_memslot()
537 struct kvm_memory_slot *memslot; in stage2_unmap_vm() local
545 kvm_for_each_memslot(memslot, slots) in stage2_unmap_vm()
546 stage2_unmap_memslot(kvm, memslot); in stage2_unmap_vm()
644 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot); in kvm_mmu_wp_memory_region() local
647 if (WARN_ON_ONCE(!memslot)) in kvm_mmu_wp_memory_region()
650 start = memslot->base_gfn << PAGE_SHIFT; in kvm_mmu_wp_memory_region()
651 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; in kvm_mmu_wp_memory_region()
710 static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot, in fault_supports_stage2_huge_mapping() argument
722 size = memslot->npages * PAGE_SIZE; in fault_supports_stage2_huge_mapping()
724 gpa_start = memslot->base_gfn << PAGE_SHIFT; in fault_supports_stage2_huge_mapping()
726 uaddr_start = memslot->userspace_addr; in fault_supports_stage2_huge_mapping()
780 transparent_hugepage_adjust(struct kvm_memory_slot *memslot, in transparent_hugepage_adjust() argument
792 fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) { in transparent_hugepage_adjust()
825 struct kvm_memory_slot *memslot, unsigned long hva, in user_mem_abort() argument
839 bool logging_active = memslot_is_logging(memslot); in user_mem_abort()
878 if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE)) in user_mem_abort()
886 if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) in user_mem_abort()
963 vma_pagesize = transparent_hugepage_adjust(memslot, hva, in user_mem_abort()
1041 struct kvm_memory_slot *memslot; in kvm_handle_guest_abort() local
1080 memslot = gfn_to_memslot(vcpu->kvm, gfn); in kvm_handle_guest_abort()
1081 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); in kvm_handle_guest_abort()
1137 ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status); in kvm_handle_guest_abort()
1159 struct kvm_memory_slot *memslot; in handle_hva_to_gpa() local
1165 kvm_for_each_memslot(memslot, slots) { in handle_hva_to_gpa()
1169 hva_start = max(start, memslot->userspace_addr); in handle_hva_to_gpa()
1170 hva_end = min(end, memslot->userspace_addr + in handle_hva_to_gpa()
1171 (memslot->npages << PAGE_SHIFT)); in handle_hva_to_gpa()
1175 gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT; in handle_hva_to_gpa()
1393 struct kvm_memory_slot *memslot, in kvm_arch_prepare_memory_region() argument
1410 if ((memslot->base_gfn + memslot->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT)) in kvm_arch_prepare_memory_region()
1448 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) { in kvm_arch_prepare_memory_region()
1469 stage2_flush_memslot(kvm, memslot); in kvm_arch_prepare_memory_region()