Lines Matching refs:kvm
108 static int sev_get_asid(struct kvm *kvm) in sev_get_asid() argument
110 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; in sev_get_asid()
150 static void sev_unbind_asid(struct kvm *kvm, unsigned int handle) in sev_unbind_asid() argument
174 static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) in sev_guest_init() argument
176 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; in sev_guest_init()
179 if (kvm->created_vcpus) in sev_guest_init()
205 static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error) in sev_bind_asid() argument
208 int asid = sev_get_asid(kvm); in sev_bind_asid()
239 static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error) in sev_issue_cmd() argument
241 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; in sev_issue_cmd()
246 static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp) in sev_launch_start() argument
248 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; in sev_launch_start()
255 if (!sev_guest(kvm)) in sev_launch_start()
298 ret = sev_bind_asid(kvm, start->handle, error); in sev_launch_start()
307 sev_unbind_asid(kvm, start->handle); in sev_launch_start()
324 static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, in sev_pin_memory() argument
328 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; in sev_pin_memory()
336 lockdep_assert_held(&kvm->lock); in sev_pin_memory()
387 static void sev_unpin_memory(struct kvm *kvm, struct page **pages, in sev_unpin_memory() argument
390 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; in sev_unpin_memory()
434 static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) in sev_launch_update_data() argument
437 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; in sev_launch_update_data()
443 if (!sev_guest(kvm)) in sev_launch_update_data()
458 inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1); in sev_launch_update_data()
487 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, data, &argp->error); in sev_launch_update_data()
502 sev_unpin_memory(kvm, inpages, npages); in sev_launch_update_data()
508 static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp) in sev_launch_measure() argument
511 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; in sev_launch_measure()
518 if (!sev_guest(kvm)) in sev_launch_measure()
550 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, data, &argp->error); in sev_launch_measure()
577 static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp) in sev_launch_finish() argument
579 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; in sev_launch_finish()
583 if (!sev_guest(kvm)) in sev_launch_finish()
591 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, data, &argp->error); in sev_launch_finish()
597 static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp) in sev_guest_status() argument
599 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; in sev_guest_status()
604 if (!sev_guest(kvm)) in sev_guest_status()
612 ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, data, &argp->error); in sev_guest_status()
627 static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src, in __sev_issue_dbg_cmd() argument
631 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; in __sev_issue_dbg_cmd()
644 ret = sev_issue_cmd(kvm, in __sev_issue_dbg_cmd()
651 static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr, in __sev_dbg_decrypt() argument
664 return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false); in __sev_dbg_decrypt()
667 static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr, in __sev_dbg_decrypt_user() argument
686 ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err); in __sev_dbg_decrypt_user()
704 static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr, in __sev_dbg_encrypt_user() argument
744 ret = __sev_dbg_decrypt(kvm, dst_paddr, in __sev_dbg_encrypt_user()
771 ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true); in __sev_dbg_encrypt_user()
781 static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec) in sev_dbg_crypt() argument
791 if (!sev_guest(kvm)) in sev_dbg_crypt()
811 src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0); in sev_dbg_crypt()
815 dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1); in sev_dbg_crypt()
817 sev_unpin_memory(kvm, src_p, n); in sev_dbg_crypt()
838 ret = __sev_dbg_decrypt_user(kvm, in sev_dbg_crypt()
844 ret = __sev_dbg_encrypt_user(kvm, in sev_dbg_crypt()
851 sev_unpin_memory(kvm, src_p, n); in sev_dbg_crypt()
852 sev_unpin_memory(kvm, dst_p, n); in sev_dbg_crypt()
865 static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp) in sev_launch_secret() argument
867 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; in sev_launch_secret()
875 if (!sev_guest(kvm)) in sev_launch_secret()
881 pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1); in sev_launch_secret()
927 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error); in sev_launch_secret()
941 sev_unpin_memory(kvm, pages, n); in sev_launch_secret()
945 int svm_mem_enc_op(struct kvm *kvm, void __user *argp) in svm_mem_enc_op() argument
959 mutex_lock(&kvm->lock); in svm_mem_enc_op()
963 r = sev_guest_init(kvm, &sev_cmd); in svm_mem_enc_op()
966 r = sev_launch_start(kvm, &sev_cmd); in svm_mem_enc_op()
969 r = sev_launch_update_data(kvm, &sev_cmd); in svm_mem_enc_op()
972 r = sev_launch_measure(kvm, &sev_cmd); in svm_mem_enc_op()
975 r = sev_launch_finish(kvm, &sev_cmd); in svm_mem_enc_op()
978 r = sev_guest_status(kvm, &sev_cmd); in svm_mem_enc_op()
981 r = sev_dbg_crypt(kvm, &sev_cmd, true); in svm_mem_enc_op()
984 r = sev_dbg_crypt(kvm, &sev_cmd, false); in svm_mem_enc_op()
987 r = sev_launch_secret(kvm, &sev_cmd); in svm_mem_enc_op()
998 mutex_unlock(&kvm->lock); in svm_mem_enc_op()
1002 int svm_register_enc_region(struct kvm *kvm, in svm_register_enc_region() argument
1005 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; in svm_register_enc_region()
1009 if (!sev_guest(kvm)) in svm_register_enc_region()
1019 mutex_lock(&kvm->lock); in svm_register_enc_region()
1020 region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, 1); in svm_register_enc_region()
1023 mutex_unlock(&kvm->lock); in svm_register_enc_region()
1031 mutex_unlock(&kvm->lock); in svm_register_enc_region()
1049 find_enc_region(struct kvm *kvm, struct kvm_enc_region *range) in find_enc_region() argument
1051 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; in find_enc_region()
1064 static void __unregister_enc_region_locked(struct kvm *kvm, in __unregister_enc_region_locked() argument
1067 sev_unpin_memory(kvm, region->pages, region->npages); in __unregister_enc_region_locked()
1072 int svm_unregister_enc_region(struct kvm *kvm, in svm_unregister_enc_region() argument
1078 mutex_lock(&kvm->lock); in svm_unregister_enc_region()
1080 if (!sev_guest(kvm)) { in svm_unregister_enc_region()
1085 region = find_enc_region(kvm, range); in svm_unregister_enc_region()
1098 __unregister_enc_region_locked(kvm, region); in svm_unregister_enc_region()
1100 mutex_unlock(&kvm->lock); in svm_unregister_enc_region()
1104 mutex_unlock(&kvm->lock); in svm_unregister_enc_region()
1108 void sev_vm_destroy(struct kvm *kvm) in sev_vm_destroy() argument
1110 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; in sev_vm_destroy()
1114 if (!sev_guest(kvm)) in sev_vm_destroy()
1117 mutex_lock(&kvm->lock); in sev_vm_destroy()
1132 __unregister_enc_region_locked(kvm, in sev_vm_destroy()
1138 mutex_unlock(&kvm->lock); in sev_vm_destroy()
1140 sev_unbind_asid(kvm, sev->handle); in sev_vm_destroy()
1180 void sev_guest_memory_reclaimed(struct kvm *kvm) in sev_guest_memory_reclaimed() argument
1182 if (!sev_guest(kvm)) in sev_guest_memory_reclaimed()
1191 int asid = sev_get_asid(svm->vcpu.kvm); in pre_sev_run()