Lines Matching refs:kvm

232 	if (vcpu->kvm->arch.l1_ptcr == 0)  in kvmhv_enter_nested_guest()
241 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmhv_enter_nested_guest()
246 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in kvmhv_enter_nested_guest()
278 l2 = kvmhv_get_nested(vcpu->kvm, l2_hv.lpid, true); in kvmhv_enter_nested_guest()
348 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmhv_enter_nested_guest()
353 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in kvmhv_enter_nested_guest()
454 void kvmhv_vm_nested_init(struct kvm *kvm) in kvmhv_vm_nested_init() argument
456 kvm->arch.max_nested_lpid = -1; in kvmhv_vm_nested_init()
466 struct kvm *kvm = vcpu->kvm; in kvmhv_set_partition_table() local
471 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmhv_set_partition_table()
477 !kvm_is_visible_gfn(vcpu->kvm, (ptcr & PRTB_MASK) >> PAGE_SHIFT)) in kvmhv_set_partition_table()
479 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmhv_set_partition_table()
481 kvm->arch.l1_ptcr = ptcr; in kvmhv_set_partition_table()
517 gp = kvmhv_get_nested(vcpu->kvm, l1_lpid, false); in kvmhv_copy_tofrom_guest_nested()
533 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmhv_copy_tofrom_guest_nested()
535 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in kvmhv_copy_tofrom_guest_nested()
540 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmhv_copy_tofrom_guest_nested()
542 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in kvmhv_copy_tofrom_guest_nested()
573 struct kvm *kvm = gp->l1_host; in kvmhv_update_ptbl_cache() local
576 ptbl_addr = (kvm->arch.l1_ptcr & PRTB_MASK) + (gp->l1_lpid << 4); in kvmhv_update_ptbl_cache()
577 if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 8))) { in kvmhv_update_ptbl_cache()
578 int srcu_idx = srcu_read_lock(&kvm->srcu); in kvmhv_update_ptbl_cache()
579 ret = kvm_read_guest(kvm, ptbl_addr, in kvmhv_update_ptbl_cache()
581 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmhv_update_ptbl_cache()
593 static struct kvm_nested_guest *kvmhv_alloc_nested(struct kvm *kvm, unsigned int lpid) in kvmhv_alloc_nested() argument
601 gp->l1_host = kvm; in kvmhv_alloc_nested()
604 gp->shadow_pgtable = pgd_alloc(kvm->mm); in kvmhv_alloc_nested()
618 pgd_free(kvm->mm, gp->shadow_pgtable); in kvmhv_alloc_nested()
629 struct kvm *kvm = gp->l1_host; in kvmhv_release_nested() local
637 kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, in kvmhv_release_nested()
639 pgd_free(kvm->mm, gp->shadow_pgtable); in kvmhv_release_nested()
648 struct kvm *kvm = gp->l1_host; in kvmhv_remove_nested() local
652 spin_lock(&kvm->mmu_lock); in kvmhv_remove_nested()
653 if (gp == kvm->arch.nested_guests[lpid]) { in kvmhv_remove_nested()
654 kvm->arch.nested_guests[lpid] = NULL; in kvmhv_remove_nested()
655 if (lpid == kvm->arch.max_nested_lpid) { in kvmhv_remove_nested()
656 while (--lpid >= 0 && !kvm->arch.nested_guests[lpid]) in kvmhv_remove_nested()
658 kvm->arch.max_nested_lpid = lpid; in kvmhv_remove_nested()
663 spin_unlock(&kvm->mmu_lock); in kvmhv_remove_nested()
674 void kvmhv_release_all_nested(struct kvm *kvm) in kvmhv_release_all_nested() argument
682 spin_lock(&kvm->mmu_lock); in kvmhv_release_all_nested()
683 for (i = 0; i <= kvm->arch.max_nested_lpid; i++) { in kvmhv_release_all_nested()
684 gp = kvm->arch.nested_guests[i]; in kvmhv_release_all_nested()
687 kvm->arch.nested_guests[i] = NULL; in kvmhv_release_all_nested()
693 kvm->arch.max_nested_lpid = -1; in kvmhv_release_all_nested()
694 spin_unlock(&kvm->mmu_lock); in kvmhv_release_all_nested()
700 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmhv_release_all_nested()
701 kvm_for_each_memslot(memslot, kvm_memslots(kvm)) in kvmhv_release_all_nested()
703 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmhv_release_all_nested()
709 struct kvm *kvm = gp->l1_host; in kvmhv_flush_nested() local
711 spin_lock(&kvm->mmu_lock); in kvmhv_flush_nested()
712 kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, gp->shadow_lpid); in kvmhv_flush_nested()
713 spin_unlock(&kvm->mmu_lock); in kvmhv_flush_nested()
720 struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid, in kvmhv_get_nested() argument
726 l1_lpid >= (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4))) in kvmhv_get_nested()
729 spin_lock(&kvm->mmu_lock); in kvmhv_get_nested()
730 gp = kvm->arch.nested_guests[l1_lpid]; in kvmhv_get_nested()
733 spin_unlock(&kvm->mmu_lock); in kvmhv_get_nested()
738 newgp = kvmhv_alloc_nested(kvm, l1_lpid); in kvmhv_get_nested()
741 spin_lock(&kvm->mmu_lock); in kvmhv_get_nested()
742 if (kvm->arch.nested_guests[l1_lpid]) { in kvmhv_get_nested()
744 gp = kvm->arch.nested_guests[l1_lpid]; in kvmhv_get_nested()
746 kvm->arch.nested_guests[l1_lpid] = newgp; in kvmhv_get_nested()
750 if (l1_lpid > kvm->arch.max_nested_lpid) in kvmhv_get_nested()
751 kvm->arch.max_nested_lpid = l1_lpid; in kvmhv_get_nested()
754 spin_unlock(&kvm->mmu_lock); in kvmhv_get_nested()
764 struct kvm *kvm = gp->l1_host; in kvmhv_put_nested() local
767 spin_lock(&kvm->mmu_lock); in kvmhv_put_nested()
769 spin_unlock(&kvm->mmu_lock); in kvmhv_put_nested()
774 static struct kvm_nested_guest *kvmhv_find_nested(struct kvm *kvm, int lpid) in kvmhv_find_nested() argument
776 if (lpid > kvm->arch.max_nested_lpid) in kvmhv_find_nested()
778 return kvm->arch.nested_guests[lpid]; in kvmhv_find_nested()
781 pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid, in find_kvm_nested_guest_pte() argument
787 gp = kvmhv_find_nested(kvm, lpid); in find_kvm_nested_guest_pte()
791 VM_WARN(!spin_is_locked(&kvm->mmu_lock), in find_kvm_nested_guest_pte()
804 void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp, in kvmhv_insert_nest_rmap() argument
836 static void kvmhv_update_nest_rmap_rc(struct kvm *kvm, u64 n_rmap, in kvmhv_update_nest_rmap_rc() argument
848 ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift); in kvmhv_update_nest_rmap_rc()
857 kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid); in kvmhv_update_nest_rmap_rc()
865 void kvmhv_update_nest_rmap_rc_list(struct kvm *kvm, unsigned long *rmapp, in kvmhv_update_nest_rmap_rc_list() argument
880 kvmhv_update_nest_rmap_rc(kvm, rmap, clr, set, hpa, mask); in kvmhv_update_nest_rmap_rc_list()
883 static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap, in kvmhv_remove_nest_rmap() argument
893 gp = kvmhv_find_nested(kvm, lpid); in kvmhv_remove_nest_rmap()
898 ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift); in kvmhv_remove_nest_rmap()
901 kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid); in kvmhv_remove_nest_rmap()
904 static void kvmhv_remove_nest_rmap_list(struct kvm *kvm, unsigned long *rmapp, in kvmhv_remove_nest_rmap_list() argument
912 kvmhv_remove_nest_rmap(kvm, rmap, hpa, mask); in kvmhv_remove_nest_rmap_list()
918 void kvmhv_remove_nest_rmap_range(struct kvm *kvm, in kvmhv_remove_nest_rmap_range() argument
936 kvmhv_remove_nest_rmap_list(kvm, rmap, hpa, addr_mask); in kvmhv_remove_nest_rmap_range()
959 struct kvm *kvm = vcpu->kvm; in kvmhv_invalidate_shadow_pte() local
964 spin_lock(&kvm->mmu_lock); in kvmhv_invalidate_shadow_pte()
965 ptep = find_kvm_nested_guest_pte(kvm, gp->l1_lpid, gpa, &shift); in kvmhv_invalidate_shadow_pte()
969 kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid); in kvmhv_invalidate_shadow_pte()
972 spin_unlock(&kvm->mmu_lock); in kvmhv_invalidate_shadow_pte()
1017 struct kvm *kvm = vcpu->kvm; in kvmhv_emulate_tlbie_tlb_addr() local
1032 gp = kvmhv_get_nested(kvm, lpid, false); in kvmhv_emulate_tlbie_tlb_addr()
1053 struct kvm *kvm = vcpu->kvm; in kvmhv_emulate_tlbie_lpid() local
1059 spin_lock(&kvm->mmu_lock); in kvmhv_emulate_tlbie_lpid()
1060 kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, in kvmhv_emulate_tlbie_lpid()
1063 spin_unlock(&kvm->mmu_lock); in kvmhv_emulate_tlbie_lpid()
1083 struct kvm *kvm = vcpu->kvm; in kvmhv_emulate_tlbie_all_lpid() local
1087 spin_lock(&kvm->mmu_lock); in kvmhv_emulate_tlbie_all_lpid()
1088 for (i = 0; i <= kvm->arch.max_nested_lpid; i++) { in kvmhv_emulate_tlbie_all_lpid()
1089 gp = kvm->arch.nested_guests[i]; in kvmhv_emulate_tlbie_all_lpid()
1091 spin_unlock(&kvm->mmu_lock); in kvmhv_emulate_tlbie_all_lpid()
1093 spin_lock(&kvm->mmu_lock); in kvmhv_emulate_tlbie_all_lpid()
1096 spin_unlock(&kvm->mmu_lock); in kvmhv_emulate_tlbie_all_lpid()
1102 struct kvm *kvm = vcpu->kvm; in kvmhv_emulate_priv_tlbie() local
1139 gp = kvmhv_get_nested(kvm, lpid, false); in kvmhv_emulate_priv_tlbie()
1242 struct kvm *kvm = vcpu->kvm; in kvmhv_handle_nested_set_rc() local
1254 spin_lock(&kvm->mmu_lock); in kvmhv_handle_nested_set_rc()
1256 ret = kvmppc_hv_handle_set_rc(kvm, false, writing, in kvmhv_handle_nested_set_rc()
1257 gpte.raddr, kvm->arch.lpid); in kvmhv_handle_nested_set_rc()
1264 ret = kvmppc_hv_handle_set_rc(kvm, true, writing, in kvmhv_handle_nested_set_rc()
1272 spin_unlock(&kvm->mmu_lock); in kvmhv_handle_nested_set_rc()
1304 struct kvm *kvm = vcpu->kvm; in __kvmhv_nested_page_fault() local
1375 memslot = gfn_to_memslot(kvm, gfn); in __kvmhv_nested_page_fault()
1399 mmu_seq = kvm->mmu_notifier_seq; in __kvmhv_nested_page_fault()
1404 spin_lock(&kvm->mmu_lock); in __kvmhv_nested_page_fault()
1405 pte_p = find_kvm_secondary_pte(kvm, gpa, &shift); in __kvmhv_nested_page_fault()
1410 spin_unlock(&kvm->mmu_lock); in __kvmhv_nested_page_fault()
1457 ret = kvmppc_create_pte(kvm, gp->shadow_pgtable, pte, n_gpa, level, in __kvmhv_nested_page_fault()
1481 int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid) in kvmhv_nested_next_lpid() argument
1485 spin_lock(&kvm->mmu_lock); in kvmhv_nested_next_lpid()
1486 while (++lpid <= kvm->arch.max_nested_lpid) { in kvmhv_nested_next_lpid()
1487 if (kvm->arch.nested_guests[lpid]) { in kvmhv_nested_next_lpid()
1492 spin_unlock(&kvm->mmu_lock); in kvmhv_nested_next_lpid()