Lines Matching refs:kvm

231 	struct kvm *kvm;  member
246 int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot) in kvmppc_uvmem_slot_init() argument
261 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_slot_init()
262 list_add(&p->list, &kvm->arch.uvmem_pfns); in kvmppc_uvmem_slot_init()
263 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_slot_init()
271 void kvmppc_uvmem_slot_free(struct kvm *kvm, const struct kvm_memory_slot *slot) in kvmppc_uvmem_slot_free() argument
275 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_slot_free()
276 list_for_each_entry_safe(p, next, &kvm->arch.uvmem_pfns, list) { in kvmppc_uvmem_slot_free()
284 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_slot_free()
287 static void kvmppc_mark_gfn(unsigned long gfn, struct kvm *kvm, in kvmppc_mark_gfn() argument
292 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) { in kvmppc_mark_gfn()
307 unsigned long uvmem_pfn, struct kvm *kvm) in kvmppc_gfn_secure_uvmem_pfn() argument
309 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_UVMEM_PFN, uvmem_pfn); in kvmppc_gfn_secure_uvmem_pfn()
313 static void kvmppc_gfn_secure_mem_pfn(unsigned long gfn, struct kvm *kvm) in kvmppc_gfn_secure_mem_pfn() argument
315 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_MEM_PFN, 0); in kvmppc_gfn_secure_mem_pfn()
319 static void kvmppc_gfn_shared(unsigned long gfn, struct kvm *kvm) in kvmppc_gfn_shared() argument
321 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_SHARED, 0); in kvmppc_gfn_shared()
325 static void kvmppc_gfn_remove(unsigned long gfn, struct kvm *kvm) in kvmppc_gfn_remove() argument
327 kvmppc_mark_gfn(gfn, kvm, 0, 0); in kvmppc_gfn_remove()
331 static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm, in kvmppc_gfn_is_uvmem_pfn() argument
336 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) { in kvmppc_gfn_is_uvmem_pfn()
360 struct kvm *kvm, unsigned long *gfn) in kvmppc_next_nontransitioned_gfn() argument
366 list_for_each_entry(iter, &kvm->arch.uvmem_pfns, list) in kvmppc_next_nontransitioned_gfn()
389 static int kvmppc_memslot_page_merge(struct kvm *kvm, in kvmppc_memslot_page_merge() argument
393 unsigned long end, start = gfn_to_hva(kvm, gfn); in kvmppc_memslot_page_merge()
403 mmap_write_lock(kvm->mm); in kvmppc_memslot_page_merge()
405 vma = find_vma_intersection(kvm->mm, start, end); in kvmppc_memslot_page_merge()
419 mmap_write_unlock(kvm->mm); in kvmppc_memslot_page_merge()
423 static void __kvmppc_uvmem_memslot_delete(struct kvm *kvm, in __kvmppc_uvmem_memslot_delete() argument
426 uv_unregister_mem_slot(kvm->arch.lpid, memslot->id); in __kvmppc_uvmem_memslot_delete()
427 kvmppc_uvmem_slot_free(kvm, memslot); in __kvmppc_uvmem_memslot_delete()
428 kvmppc_memslot_page_merge(kvm, memslot, true); in __kvmppc_uvmem_memslot_delete()
431 static int __kvmppc_uvmem_memslot_create(struct kvm *kvm, in __kvmppc_uvmem_memslot_create() argument
436 if (kvmppc_memslot_page_merge(kvm, memslot, false)) in __kvmppc_uvmem_memslot_create()
439 if (kvmppc_uvmem_slot_init(kvm, memslot)) in __kvmppc_uvmem_memslot_create()
442 ret = uv_register_mem_slot(kvm->arch.lpid, in __kvmppc_uvmem_memslot_create()
452 kvmppc_uvmem_slot_free(kvm, memslot); in __kvmppc_uvmem_memslot_create()
454 kvmppc_memslot_page_merge(kvm, memslot, true); in __kvmppc_uvmem_memslot_create()
458 unsigned long kvmppc_h_svm_init_start(struct kvm *kvm) in kvmppc_h_svm_init_start() argument
465 kvm->arch.secure_guest = KVMPPC_SECURE_INIT_START; in kvmppc_h_svm_init_start()
471 if (!kvm_is_radix(kvm)) in kvmppc_h_svm_init_start()
475 if (!kvm->arch.svm_enabled) in kvmppc_h_svm_init_start()
478 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_h_svm_init_start()
481 slots = kvm_memslots(kvm); in kvmppc_h_svm_init_start()
483 ret = __kvmppc_uvmem_memslot_create(kvm, memslot); in kvmppc_h_svm_init_start()
489 slots = kvm_memslots(kvm); in kvmppc_h_svm_init_start()
493 __kvmppc_uvmem_memslot_delete(kvm, memslot); in kvmppc_h_svm_init_start()
497 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_h_svm_init_start()
509 struct kvm *kvm, unsigned long gpa) in __kvmppc_svm_page_out() argument
528 if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL)) in __kvmppc_svm_page_out()
560 ret = uv_page_out(kvm->arch.lpid, pfn << page_shift, in __kvmppc_svm_page_out()
581 struct kvm *kvm, unsigned long gpa) in kvmppc_svm_page_out() argument
585 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_svm_page_out()
586 ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa); in kvmppc_svm_page_out()
587 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_svm_page_out()
601 struct kvm *kvm, bool skip_page_out) in kvmppc_uvmem_drop_pages() argument
610 mmap_read_lock(kvm->mm); in kvmppc_uvmem_drop_pages()
619 vma = find_vma_intersection(kvm->mm, addr, addr+1); in kvmppc_uvmem_drop_pages()
626 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_drop_pages()
628 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) { in kvmppc_uvmem_drop_pages()
635 PAGE_SHIFT, kvm, pvt->gpa)) in kvmppc_uvmem_drop_pages()
640 kvmppc_gfn_remove(gfn, kvm); in kvmppc_uvmem_drop_pages()
643 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_drop_pages()
646 mmap_read_unlock(kvm->mm); in kvmppc_uvmem_drop_pages()
649 unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm) in kvmppc_h_svm_init_abort() argument
658 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) in kvmppc_h_svm_init_abort()
661 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) in kvmppc_h_svm_init_abort()
664 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_h_svm_init_abort()
666 kvm_for_each_memslot(memslot, kvm_memslots(kvm)) in kvmppc_h_svm_init_abort()
667 kvmppc_uvmem_drop_pages(memslot, kvm, false); in kvmppc_h_svm_init_abort()
669 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_h_svm_init_abort()
671 kvm->arch.secure_guest = 0; in kvmppc_h_svm_init_abort()
672 uv_svm_terminate(kvm->arch.lpid); in kvmppc_h_svm_init_abort()
685 static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm) in kvmppc_uvmem_get_page() argument
709 kvmppc_gfn_secure_uvmem_pfn(gpa >> PAGE_SHIFT, uvmem_pfn, kvm); in kvmppc_uvmem_get_page()
712 pvt->kvm = kvm; in kvmppc_uvmem_get_page()
733 unsigned long end, unsigned long gpa, struct kvm *kvm, in kvmppc_svm_page_in() argument
761 dpage = kvmppc_uvmem_get_page(gpa, kvm); in kvmppc_svm_page_in()
771 ret = uv_page_in(kvm->arch.lpid, pfn << page_shift, in kvmppc_svm_page_in()
785 static int kvmppc_uv_migrate_mem_slot(struct kvm *kvm, in kvmppc_uv_migrate_mem_slot() argument
793 mmap_read_lock(kvm->mm); in kvmppc_uv_migrate_mem_slot()
794 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_uv_migrate_mem_slot()
795 while (kvmppc_next_nontransitioned_gfn(memslot, kvm, &gfn)) { in kvmppc_uv_migrate_mem_slot()
797 start = gfn_to_hva(kvm, gfn); in kvmppc_uv_migrate_mem_slot()
802 vma = find_vma_intersection(kvm->mm, start, end); in kvmppc_uv_migrate_mem_slot()
807 (gfn << PAGE_SHIFT), kvm, PAGE_SHIFT, false); in kvmppc_uv_migrate_mem_slot()
816 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_uv_migrate_mem_slot()
817 mmap_read_unlock(kvm->mm); in kvmppc_uv_migrate_mem_slot()
821 unsigned long kvmppc_h_svm_init_done(struct kvm *kvm) in kvmppc_h_svm_init_done() argument
828 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) in kvmppc_h_svm_init_done()
832 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_h_svm_init_done()
833 slots = kvm_memslots(kvm); in kvmppc_h_svm_init_done()
835 ret = kvmppc_uv_migrate_mem_slot(kvm, memslot); in kvmppc_h_svm_init_done()
851 kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE; in kvmppc_h_svm_init_done()
852 pr_info("LPID %d went secure\n", kvm->arch.lpid); in kvmppc_h_svm_init_done()
855 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_h_svm_init_done()
868 static unsigned long kvmppc_share_page(struct kvm *kvm, unsigned long gpa, in kvmppc_share_page() argument
880 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_share_page()
881 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_share_page()
882 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) { in kvmppc_share_page()
894 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_share_page()
895 pfn = gfn_to_pfn(kvm, gfn); in kvmppc_share_page()
899 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_share_page()
900 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) { in kvmppc_share_page()
909 if (!uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0, in kvmppc_share_page()
911 kvmppc_gfn_shared(gfn, kvm); in kvmppc_share_page()
915 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_share_page()
917 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_share_page()
927 unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa, in kvmppc_h_svm_page_in() argument
937 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) in kvmppc_h_svm_page_in()
947 return kvmppc_share_page(kvm, gpa, page_shift); in kvmppc_h_svm_page_in()
950 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_h_svm_page_in()
951 mmap_read_lock(kvm->mm); in kvmppc_h_svm_page_in()
953 start = gfn_to_hva(kvm, gfn); in kvmppc_h_svm_page_in()
957 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_h_svm_page_in()
959 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL)) in kvmppc_h_svm_page_in()
963 vma = find_vma_intersection(kvm->mm, start, end); in kvmppc_h_svm_page_in()
967 if (kvmppc_svm_page_in(vma, start, end, gpa, kvm, page_shift, in kvmppc_h_svm_page_in()
974 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_h_svm_page_in()
976 mmap_read_unlock(kvm->mm); in kvmppc_h_svm_page_in()
977 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_h_svm_page_in()
996 pvt->kvm, pvt->gpa)) in kvmppc_uvmem_migrate_to_ram()
1022 kvmppc_gfn_remove(pvt->gpa >> PAGE_SHIFT, pvt->kvm); in kvmppc_uvmem_page_free()
1024 kvmppc_gfn_secure_mem_pfn(pvt->gpa >> PAGE_SHIFT, pvt->kvm); in kvmppc_uvmem_page_free()
1037 kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa, in kvmppc_h_svm_page_out() argument
1046 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) in kvmppc_h_svm_page_out()
1056 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_h_svm_page_out()
1057 mmap_read_lock(kvm->mm); in kvmppc_h_svm_page_out()
1058 start = gfn_to_hva(kvm, gfn); in kvmppc_h_svm_page_out()
1063 vma = find_vma_intersection(kvm->mm, start, end); in kvmppc_h_svm_page_out()
1067 if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa)) in kvmppc_h_svm_page_out()
1070 mmap_read_unlock(kvm->mm); in kvmppc_h_svm_page_out()
1071 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_h_svm_page_out()
1075 int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn) in kvmppc_send_page_to_uv() argument
1080 pfn = gfn_to_pfn(kvm, gfn); in kvmppc_send_page_to_uv()
1084 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_send_page_to_uv()
1085 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL)) in kvmppc_send_page_to_uv()
1088 ret = uv_page_in(kvm->arch.lpid, pfn << PAGE_SHIFT, gfn << PAGE_SHIFT, in kvmppc_send_page_to_uv()
1092 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_send_page_to_uv()
1096 int kvmppc_uvmem_memslot_create(struct kvm *kvm, const struct kvm_memory_slot *new) in kvmppc_uvmem_memslot_create() argument
1098 int ret = __kvmppc_uvmem_memslot_create(kvm, new); in kvmppc_uvmem_memslot_create()
1101 ret = kvmppc_uv_migrate_mem_slot(kvm, new); in kvmppc_uvmem_memslot_create()
1106 void kvmppc_uvmem_memslot_delete(struct kvm *kvm, const struct kvm_memory_slot *old) in kvmppc_uvmem_memslot_delete() argument
1108 __kvmppc_uvmem_memslot_delete(kvm, old); in kvmppc_uvmem_memslot_delete()