Lines Matching refs:kvm

245 static int sca_switch_to_extended(struct kvm *kvm);
278 struct kvm *kvm; in kvm_clock_sync() local
283 list_for_each_entry(kvm, &vm_list, vm_list) { in kvm_clock_sync()
284 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_clock_sync()
287 kvm->arch.epoch = vcpu->arch.sie_block->epoch; in kvm_clock_sync()
288 kvm->arch.epdx = vcpu->arch.sie_block->epdx; in kvm_clock_sync()
515 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) in kvm_vm_ioctl_check_extension() argument
553 if (hpage && !kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_check_extension()
592 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) in kvm_arch_sync_dirty_log() argument
597 struct gmap *gmap = kvm->arch.gmap; in kvm_arch_sync_dirty_log()
613 mark_page_dirty(kvm, cur_gfn + i); in kvm_arch_sync_dirty_log()
628 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, in kvm_vm_ioctl_get_dirty_log() argument
636 if (kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_get_dirty_log()
639 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
645 r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot); in kvm_vm_ioctl_get_dirty_log()
656 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
660 static void icpt_operexc_on_all_vcpus(struct kvm *kvm) in icpt_operexc_on_all_vcpus() argument
665 kvm_for_each_vcpu(i, vcpu, kvm) { in icpt_operexc_on_all_vcpus()
670 int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) in kvm_vm_ioctl_enable_cap() argument
679 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP"); in kvm_vm_ioctl_enable_cap()
680 kvm->arch.use_irqchip = 1; in kvm_vm_ioctl_enable_cap()
684 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP"); in kvm_vm_ioctl_enable_cap()
685 kvm->arch.user_sigp = 1; in kvm_vm_ioctl_enable_cap()
689 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
690 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
693 set_kvm_facility(kvm->arch.model.fac_mask, 129); in kvm_vm_ioctl_enable_cap()
694 set_kvm_facility(kvm->arch.model.fac_list, 129); in kvm_vm_ioctl_enable_cap()
696 set_kvm_facility(kvm->arch.model.fac_mask, 134); in kvm_vm_ioctl_enable_cap()
697 set_kvm_facility(kvm->arch.model.fac_list, 134); in kvm_vm_ioctl_enable_cap()
700 set_kvm_facility(kvm->arch.model.fac_mask, 135); in kvm_vm_ioctl_enable_cap()
701 set_kvm_facility(kvm->arch.model.fac_list, 135); in kvm_vm_ioctl_enable_cap()
704 set_kvm_facility(kvm->arch.model.fac_mask, 148); in kvm_vm_ioctl_enable_cap()
705 set_kvm_facility(kvm->arch.model.fac_list, 148); in kvm_vm_ioctl_enable_cap()
708 set_kvm_facility(kvm->arch.model.fac_mask, 152); in kvm_vm_ioctl_enable_cap()
709 set_kvm_facility(kvm->arch.model.fac_list, 152); in kvm_vm_ioctl_enable_cap()
714 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
715 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s", in kvm_vm_ioctl_enable_cap()
720 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
721 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
724 set_kvm_facility(kvm->arch.model.fac_mask, 64); in kvm_vm_ioctl_enable_cap()
725 set_kvm_facility(kvm->arch.model.fac_list, 64); in kvm_vm_ioctl_enable_cap()
728 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
729 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s", in kvm_vm_ioctl_enable_cap()
733 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
734 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
737 set_kvm_facility(kvm->arch.model.fac_mask, 72); in kvm_vm_ioctl_enable_cap()
738 set_kvm_facility(kvm->arch.model.fac_list, 72); in kvm_vm_ioctl_enable_cap()
741 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
742 VM_EVENT(kvm, 3, "ENABLE: AIS %s", in kvm_vm_ioctl_enable_cap()
747 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
748 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
751 set_kvm_facility(kvm->arch.model.fac_mask, 133); in kvm_vm_ioctl_enable_cap()
752 set_kvm_facility(kvm->arch.model.fac_list, 133); in kvm_vm_ioctl_enable_cap()
755 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
756 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s", in kvm_vm_ioctl_enable_cap()
760 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
761 if (kvm->created_vcpus) in kvm_vm_ioctl_enable_cap()
763 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_enable_cap()
767 mmap_write_lock(kvm->mm); in kvm_vm_ioctl_enable_cap()
768 kvm->mm->context.allow_gmap_hpage_1m = 1; in kvm_vm_ioctl_enable_cap()
769 mmap_write_unlock(kvm->mm); in kvm_vm_ioctl_enable_cap()
775 kvm->arch.use_skf = 0; in kvm_vm_ioctl_enable_cap()
776 kvm->arch.use_pfmfi = 0; in kvm_vm_ioctl_enable_cap()
778 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
779 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s", in kvm_vm_ioctl_enable_cap()
783 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI"); in kvm_vm_ioctl_enable_cap()
784 kvm->arch.user_stsi = 1; in kvm_vm_ioctl_enable_cap()
788 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0"); in kvm_vm_ioctl_enable_cap()
789 kvm->arch.user_instr0 = 1; in kvm_vm_ioctl_enable_cap()
790 icpt_operexc_on_all_vcpus(kvm); in kvm_vm_ioctl_enable_cap()
800 static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_mem_control() argument
807 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes", in kvm_s390_get_mem_control()
808 kvm->arch.mem_limit); in kvm_s390_get_mem_control()
809 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr)) in kvm_s390_get_mem_control()
819 static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_mem_control() argument
829 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support"); in kvm_s390_set_mem_control()
830 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
831 if (kvm->created_vcpus) in kvm_s390_set_mem_control()
833 else if (kvm->mm->context.allow_gmap_hpage_1m) in kvm_s390_set_mem_control()
836 kvm->arch.use_cmma = 1; in kvm_s390_set_mem_control()
838 kvm->arch.use_pfmfi = 0; in kvm_s390_set_mem_control()
841 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
848 if (!kvm->arch.use_cmma) in kvm_s390_set_mem_control()
851 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states"); in kvm_s390_set_mem_control()
852 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
853 idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_mem_control()
854 s390_reset_cmma(kvm->arch.gmap->mm); in kvm_s390_set_mem_control()
855 srcu_read_unlock(&kvm->srcu, idx); in kvm_s390_set_mem_control()
856 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
862 if (kvm_is_ucontrol(kvm)) in kvm_s390_set_mem_control()
868 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT && in kvm_s390_set_mem_control()
869 new_limit > kvm->arch.mem_limit) in kvm_s390_set_mem_control()
880 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
881 if (!kvm->created_vcpus) { in kvm_s390_set_mem_control()
888 gmap_remove(kvm->arch.gmap); in kvm_s390_set_mem_control()
889 new->private = kvm; in kvm_s390_set_mem_control()
890 kvm->arch.gmap = new; in kvm_s390_set_mem_control()
894 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
895 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit); in kvm_s390_set_mem_control()
896 VM_EVENT(kvm, 3, "New guest asce: 0x%pK", in kvm_s390_set_mem_control()
897 (void *) kvm->arch.gmap->asce); in kvm_s390_set_mem_control()
909 void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm) in kvm_s390_vcpu_crypto_reset_all() argument
914 kvm_s390_vcpu_block_all(kvm); in kvm_s390_vcpu_crypto_reset_all()
916 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_s390_vcpu_crypto_reset_all()
922 kvm_s390_vcpu_unblock_all(kvm); in kvm_s390_vcpu_crypto_reset_all()
925 static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_vm_set_crypto() argument
927 mutex_lock(&kvm->lock); in kvm_s390_vm_set_crypto()
930 if (!test_kvm_facility(kvm, 76)) { in kvm_s390_vm_set_crypto()
931 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
935 kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_vm_set_crypto()
936 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
937 kvm->arch.crypto.aes_kw = 1; in kvm_s390_vm_set_crypto()
938 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support"); in kvm_s390_vm_set_crypto()
941 if (!test_kvm_facility(kvm, 76)) { in kvm_s390_vm_set_crypto()
942 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
946 kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_vm_set_crypto()
947 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
948 kvm->arch.crypto.dea_kw = 1; in kvm_s390_vm_set_crypto()
949 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support"); in kvm_s390_vm_set_crypto()
952 if (!test_kvm_facility(kvm, 76)) { in kvm_s390_vm_set_crypto()
953 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
956 kvm->arch.crypto.aes_kw = 0; in kvm_s390_vm_set_crypto()
957 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
958 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
959 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support"); in kvm_s390_vm_set_crypto()
962 if (!test_kvm_facility(kvm, 76)) { in kvm_s390_vm_set_crypto()
963 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
966 kvm->arch.crypto.dea_kw = 0; in kvm_s390_vm_set_crypto()
967 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
968 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
969 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support"); in kvm_s390_vm_set_crypto()
973 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
976 kvm->arch.crypto.apie = 1; in kvm_s390_vm_set_crypto()
980 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
983 kvm->arch.crypto.apie = 0; in kvm_s390_vm_set_crypto()
986 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
990 kvm_s390_vcpu_crypto_reset_all(kvm); in kvm_s390_vm_set_crypto()
991 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
995 static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req) in kvm_s390_sync_request_broadcast() argument
1000 kvm_for_each_vcpu(cx, vcpu, kvm) in kvm_s390_sync_request_broadcast()
1008 static int kvm_s390_vm_start_migration(struct kvm *kvm) in kvm_s390_vm_start_migration() argument
1016 if (kvm->arch.migration_mode) in kvm_s390_vm_start_migration()
1018 slots = kvm_memslots(kvm); in kvm_s390_vm_start_migration()
1022 if (!kvm->arch.use_cmma) { in kvm_s390_vm_start_migration()
1023 kvm->arch.migration_mode = 1; in kvm_s390_vm_start_migration()
1040 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages); in kvm_s390_vm_start_migration()
1041 kvm->arch.migration_mode = 1; in kvm_s390_vm_start_migration()
1042 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION); in kvm_s390_vm_start_migration()
1050 static int kvm_s390_vm_stop_migration(struct kvm *kvm) in kvm_s390_vm_stop_migration() argument
1053 if (!kvm->arch.migration_mode) in kvm_s390_vm_stop_migration()
1055 kvm->arch.migration_mode = 0; in kvm_s390_vm_stop_migration()
1056 if (kvm->arch.use_cmma) in kvm_s390_vm_stop_migration()
1057 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION); in kvm_s390_vm_stop_migration()
1061 static int kvm_s390_vm_set_migration(struct kvm *kvm, in kvm_s390_vm_set_migration() argument
1066 mutex_lock(&kvm->slots_lock); in kvm_s390_vm_set_migration()
1069 res = kvm_s390_vm_start_migration(kvm); in kvm_s390_vm_set_migration()
1072 res = kvm_s390_vm_stop_migration(kvm); in kvm_s390_vm_set_migration()
1077 mutex_unlock(&kvm->slots_lock); in kvm_s390_vm_set_migration()
1082 static int kvm_s390_vm_get_migration(struct kvm *kvm, in kvm_s390_vm_get_migration() argument
1085 u64 mig = kvm->arch.migration_mode; in kvm_s390_vm_get_migration()
1095 static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
1097 static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_tod_ext() argument
1104 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx) in kvm_s390_set_tod_ext()
1106 __kvm_s390_set_tod_clock(kvm, &gtod); in kvm_s390_set_tod_ext()
1108 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx", in kvm_s390_set_tod_ext()
1114 static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_tod_high() argument
1124 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high); in kvm_s390_set_tod_high()
1129 static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_tod_low() argument
1137 __kvm_s390_set_tod_clock(kvm, &gtod); in kvm_s390_set_tod_low()
1138 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod); in kvm_s390_set_tod_low()
1142 static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_tod() argument
1149 mutex_lock(&kvm->lock); in kvm_s390_set_tod()
1154 if (kvm_s390_pv_is_protected(kvm)) { in kvm_s390_set_tod()
1161 ret = kvm_s390_set_tod_ext(kvm, attr); in kvm_s390_set_tod()
1164 ret = kvm_s390_set_tod_high(kvm, attr); in kvm_s390_set_tod()
1167 ret = kvm_s390_set_tod_low(kvm, attr); in kvm_s390_set_tod()
1175 mutex_unlock(&kvm->lock); in kvm_s390_set_tod()
1179 static void kvm_s390_get_tod_clock(struct kvm *kvm, in kvm_s390_get_tod_clock() argument
1188 gtod->tod = htod.tod + kvm->arch.epoch; in kvm_s390_get_tod_clock()
1190 if (test_kvm_facility(kvm, 139)) { in kvm_s390_get_tod_clock()
1191 gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx; in kvm_s390_get_tod_clock()
1199 static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_tod_ext() argument
1204 kvm_s390_get_tod_clock(kvm, &gtod); in kvm_s390_get_tod_ext()
1208 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx", in kvm_s390_get_tod_ext()
1213 static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_tod_high() argument
1220 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high); in kvm_s390_get_tod_high()
1225 static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_tod_low() argument
1229 gtod = kvm_s390_get_tod_clock_fast(kvm); in kvm_s390_get_tod_low()
1232 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod); in kvm_s390_get_tod_low()
1237 static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_tod() argument
1246 ret = kvm_s390_get_tod_ext(kvm, attr); in kvm_s390_get_tod()
1249 ret = kvm_s390_get_tod_high(kvm, attr); in kvm_s390_get_tod()
1252 ret = kvm_s390_get_tod_low(kvm, attr); in kvm_s390_get_tod()
1261 static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_processor() argument
1267 mutex_lock(&kvm->lock); in kvm_s390_set_processor()
1268 if (kvm->created_vcpus) { in kvm_s390_set_processor()
1279 kvm->arch.model.cpuid = proc->cpuid; in kvm_s390_set_processor()
1284 kvm->arch.model.ibc = unblocked_ibc; in kvm_s390_set_processor()
1286 kvm->arch.model.ibc = lowest_ibc; in kvm_s390_set_processor()
1288 kvm->arch.model.ibc = proc->ibc; in kvm_s390_set_processor()
1290 memcpy(kvm->arch.model.fac_list, proc->fac_list, in kvm_s390_set_processor()
1292 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx", in kvm_s390_set_processor()
1293 kvm->arch.model.ibc, in kvm_s390_set_processor()
1294 kvm->arch.model.cpuid); in kvm_s390_set_processor()
1295 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx", in kvm_s390_set_processor()
1296 kvm->arch.model.fac_list[0], in kvm_s390_set_processor()
1297 kvm->arch.model.fac_list[1], in kvm_s390_set_processor()
1298 kvm->arch.model.fac_list[2]); in kvm_s390_set_processor()
1303 mutex_unlock(&kvm->lock); in kvm_s390_set_processor()
1307 static int kvm_s390_set_processor_feat(struct kvm *kvm, in kvm_s390_set_processor_feat() argument
1319 mutex_lock(&kvm->lock); in kvm_s390_set_processor_feat()
1320 if (kvm->created_vcpus) { in kvm_s390_set_processor_feat()
1321 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_feat()
1324 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat, in kvm_s390_set_processor_feat()
1326 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_feat()
1327 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx", in kvm_s390_set_processor_feat()
1334 static int kvm_s390_set_processor_subfunc(struct kvm *kvm, in kvm_s390_set_processor_subfunc() argument
1337 mutex_lock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1338 if (kvm->created_vcpus) { in kvm_s390_set_processor_subfunc()
1339 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1343 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr, in kvm_s390_set_processor_subfunc()
1345 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1348 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1350 VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1351 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], in kvm_s390_set_processor_subfunc()
1352 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], in kvm_s390_set_processor_subfunc()
1353 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], in kvm_s390_set_processor_subfunc()
1354 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); in kvm_s390_set_processor_subfunc()
1355 VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1356 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], in kvm_s390_set_processor_subfunc()
1357 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); in kvm_s390_set_processor_subfunc()
1358 VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1359 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], in kvm_s390_set_processor_subfunc()
1360 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); in kvm_s390_set_processor_subfunc()
1361 VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1362 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], in kvm_s390_set_processor_subfunc()
1363 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); in kvm_s390_set_processor_subfunc()
1364 VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1365 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], in kvm_s390_set_processor_subfunc()
1366 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); in kvm_s390_set_processor_subfunc()
1367 VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1368 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], in kvm_s390_set_processor_subfunc()
1369 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); in kvm_s390_set_processor_subfunc()
1370 VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1371 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], in kvm_s390_set_processor_subfunc()
1372 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); in kvm_s390_set_processor_subfunc()
1373 VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1374 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], in kvm_s390_set_processor_subfunc()
1375 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); in kvm_s390_set_processor_subfunc()
1376 VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1377 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], in kvm_s390_set_processor_subfunc()
1378 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); in kvm_s390_set_processor_subfunc()
1379 VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1380 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], in kvm_s390_set_processor_subfunc()
1381 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); in kvm_s390_set_processor_subfunc()
1382 VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1383 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], in kvm_s390_set_processor_subfunc()
1384 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); in kvm_s390_set_processor_subfunc()
1385 VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1386 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], in kvm_s390_set_processor_subfunc()
1387 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); in kvm_s390_set_processor_subfunc()
1388 VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1389 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], in kvm_s390_set_processor_subfunc()
1390 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); in kvm_s390_set_processor_subfunc()
1391 VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1392 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], in kvm_s390_set_processor_subfunc()
1393 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); in kvm_s390_set_processor_subfunc()
1394 VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1395 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], in kvm_s390_set_processor_subfunc()
1396 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); in kvm_s390_set_processor_subfunc()
1397 VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1398 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], in kvm_s390_set_processor_subfunc()
1399 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], in kvm_s390_set_processor_subfunc()
1400 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], in kvm_s390_set_processor_subfunc()
1401 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); in kvm_s390_set_processor_subfunc()
1402 VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1403 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], in kvm_s390_set_processor_subfunc()
1404 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], in kvm_s390_set_processor_subfunc()
1405 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], in kvm_s390_set_processor_subfunc()
1406 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); in kvm_s390_set_processor_subfunc()
1411 static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_cpu_model() argument
1417 ret = kvm_s390_set_processor(kvm, attr); in kvm_s390_set_cpu_model()
1420 ret = kvm_s390_set_processor_feat(kvm, attr); in kvm_s390_set_cpu_model()
1423 ret = kvm_s390_set_processor_subfunc(kvm, attr); in kvm_s390_set_cpu_model()
1429 static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_processor() argument
1439 proc->cpuid = kvm->arch.model.cpuid; in kvm_s390_get_processor()
1440 proc->ibc = kvm->arch.model.ibc; in kvm_s390_get_processor()
1441 memcpy(&proc->fac_list, kvm->arch.model.fac_list, in kvm_s390_get_processor()
1443 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx", in kvm_s390_get_processor()
1444 kvm->arch.model.ibc, in kvm_s390_get_processor()
1445 kvm->arch.model.cpuid); in kvm_s390_get_processor()
1446 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx", in kvm_s390_get_processor()
1447 kvm->arch.model.fac_list[0], in kvm_s390_get_processor()
1448 kvm->arch.model.fac_list[1], in kvm_s390_get_processor()
1449 kvm->arch.model.fac_list[2]); in kvm_s390_get_processor()
1457 static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_machine() argument
1469 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask, in kvm_s390_get_machine()
1473 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx", in kvm_s390_get_machine()
1474 kvm->arch.model.ibc, in kvm_s390_get_machine()
1475 kvm->arch.model.cpuid); in kvm_s390_get_machine()
1476 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx", in kvm_s390_get_machine()
1480 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx", in kvm_s390_get_machine()
1491 static int kvm_s390_get_processor_feat(struct kvm *kvm, in kvm_s390_get_processor_feat() argument
1496 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat, in kvm_s390_get_processor_feat()
1500 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx", in kvm_s390_get_processor_feat()
1507 static int kvm_s390_get_machine_feat(struct kvm *kvm, in kvm_s390_get_machine_feat() argument
1517 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx", in kvm_s390_get_machine_feat()
1524 static int kvm_s390_get_processor_subfunc(struct kvm *kvm, in kvm_s390_get_processor_subfunc() argument
1527 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs, in kvm_s390_get_processor_subfunc()
1531 VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1532 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], in kvm_s390_get_processor_subfunc()
1533 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], in kvm_s390_get_processor_subfunc()
1534 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], in kvm_s390_get_processor_subfunc()
1535 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); in kvm_s390_get_processor_subfunc()
1536 VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1537 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], in kvm_s390_get_processor_subfunc()
1538 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); in kvm_s390_get_processor_subfunc()
1539 VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1540 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], in kvm_s390_get_processor_subfunc()
1541 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); in kvm_s390_get_processor_subfunc()
1542 VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1543 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], in kvm_s390_get_processor_subfunc()
1544 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); in kvm_s390_get_processor_subfunc()
1545 VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1546 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], in kvm_s390_get_processor_subfunc()
1547 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); in kvm_s390_get_processor_subfunc()
1548 VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1549 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], in kvm_s390_get_processor_subfunc()
1550 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); in kvm_s390_get_processor_subfunc()
1551 VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1552 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], in kvm_s390_get_processor_subfunc()
1553 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); in kvm_s390_get_processor_subfunc()
1554 VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1555 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], in kvm_s390_get_processor_subfunc()
1556 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); in kvm_s390_get_processor_subfunc()
1557 VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1558 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], in kvm_s390_get_processor_subfunc()
1559 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); in kvm_s390_get_processor_subfunc()
1560 VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1561 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], in kvm_s390_get_processor_subfunc()
1562 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); in kvm_s390_get_processor_subfunc()
1563 VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1564 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], in kvm_s390_get_processor_subfunc()
1565 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); in kvm_s390_get_processor_subfunc()
1566 VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1567 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], in kvm_s390_get_processor_subfunc()
1568 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); in kvm_s390_get_processor_subfunc()
1569 VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1570 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], in kvm_s390_get_processor_subfunc()
1571 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); in kvm_s390_get_processor_subfunc()
1572 VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1573 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], in kvm_s390_get_processor_subfunc()
1574 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); in kvm_s390_get_processor_subfunc()
1575 VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1576 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], in kvm_s390_get_processor_subfunc()
1577 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); in kvm_s390_get_processor_subfunc()
1578 VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1579 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], in kvm_s390_get_processor_subfunc()
1580 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], in kvm_s390_get_processor_subfunc()
1581 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], in kvm_s390_get_processor_subfunc()
1582 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); in kvm_s390_get_processor_subfunc()
1583 VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1584 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], in kvm_s390_get_processor_subfunc()
1585 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], in kvm_s390_get_processor_subfunc()
1586 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], in kvm_s390_get_processor_subfunc()
1587 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); in kvm_s390_get_processor_subfunc()
1592 static int kvm_s390_get_machine_subfunc(struct kvm *kvm, in kvm_s390_get_machine_subfunc() argument
1599 VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1604 VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1607 VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1610 VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1613 VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1616 VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1619 VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1622 VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1625 VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1628 VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1631 VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1634 VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1637 VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1640 VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1643 VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1646 VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1651 VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1660 static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_cpu_model() argument
1666 ret = kvm_s390_get_processor(kvm, attr); in kvm_s390_get_cpu_model()
1669 ret = kvm_s390_get_machine(kvm, attr); in kvm_s390_get_cpu_model()
1672 ret = kvm_s390_get_processor_feat(kvm, attr); in kvm_s390_get_cpu_model()
1675 ret = kvm_s390_get_machine_feat(kvm, attr); in kvm_s390_get_cpu_model()
1678 ret = kvm_s390_get_processor_subfunc(kvm, attr); in kvm_s390_get_cpu_model()
1681 ret = kvm_s390_get_machine_subfunc(kvm, attr); in kvm_s390_get_cpu_model()
1687 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_vm_set_attr() argument
1693 ret = kvm_s390_set_mem_control(kvm, attr); in kvm_s390_vm_set_attr()
1696 ret = kvm_s390_set_tod(kvm, attr); in kvm_s390_vm_set_attr()
1699 ret = kvm_s390_set_cpu_model(kvm, attr); in kvm_s390_vm_set_attr()
1702 ret = kvm_s390_vm_set_crypto(kvm, attr); in kvm_s390_vm_set_attr()
1705 ret = kvm_s390_vm_set_migration(kvm, attr); in kvm_s390_vm_set_attr()
1715 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_vm_get_attr() argument
1721 ret = kvm_s390_get_mem_control(kvm, attr); in kvm_s390_vm_get_attr()
1724 ret = kvm_s390_get_tod(kvm, attr); in kvm_s390_vm_get_attr()
1727 ret = kvm_s390_get_cpu_model(kvm, attr); in kvm_s390_vm_get_attr()
1730 ret = kvm_s390_vm_get_migration(kvm, attr); in kvm_s390_vm_get_attr()
1740 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_vm_has_attr() argument
1813 static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) in kvm_s390_get_skeys() argument
1835 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_get_skeys()
1837 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_get_skeys()
1847 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_get_skeys()
1861 static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) in kvm_s390_set_skeys() argument
1893 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_skeys()
1896 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_set_skeys()
1918 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_set_skeys()
1969 static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args, in kvm_s390_peek_cmma() argument
1976 hva = gfn_to_hva(kvm, cur_gfn); in kvm_s390_peek_cmma()
1983 if (get_pgste(kvm->mm, hva, &pgstev) < 0) in kvm_s390_peek_cmma()
2017 static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args, in kvm_s390_get_cmma() argument
2021 struct kvm_memslots *slots = kvm_memslots(kvm); in kvm_s390_get_cmma()
2028 ms = gfn_to_memslot(kvm, cur_gfn); in kvm_s390_get_cmma()
2037 hva = gfn_to_hva(kvm, cur_gfn); in kvm_s390_get_cmma()
2042 atomic64_dec(&kvm->arch.cmma_dirty_pages); in kvm_s390_get_cmma()
2043 if (get_pgste(kvm->mm, hva, &pgstev) < 0) in kvm_s390_get_cmma()
2060 ms = gfn_to_memslot(kvm, cur_gfn); in kvm_s390_get_cmma()
2076 static int kvm_s390_get_cmma_bits(struct kvm *kvm, in kvm_s390_get_cmma_bits() argument
2083 if (!kvm->arch.use_cmma) in kvm_s390_get_cmma_bits()
2090 if (!peek && !kvm->arch.migration_mode) in kvm_s390_get_cmma_bits()
2094 if (!bufsize || !kvm->mm->context.uses_cmm) { in kvm_s390_get_cmma_bits()
2099 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) { in kvm_s390_get_cmma_bits()
2108 mmap_read_lock(kvm->mm); in kvm_s390_get_cmma_bits()
2109 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_get_cmma_bits()
2111 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize); in kvm_s390_get_cmma_bits()
2113 ret = kvm_s390_get_cmma(kvm, args, values, bufsize); in kvm_s390_get_cmma_bits()
2114 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_get_cmma_bits()
2115 mmap_read_unlock(kvm->mm); in kvm_s390_get_cmma_bits()
2117 if (kvm->arch.migration_mode) in kvm_s390_get_cmma_bits()
2118 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages); in kvm_s390_get_cmma_bits()
2134 static int kvm_s390_set_cmma_bits(struct kvm *kvm, in kvm_s390_set_cmma_bits() argument
2143 if (!kvm->arch.use_cmma) in kvm_s390_set_cmma_bits()
2165 mmap_read_lock(kvm->mm); in kvm_s390_set_cmma_bits()
2166 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_cmma_bits()
2168 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_set_cmma_bits()
2177 set_pgste_bits(kvm->mm, hva, mask, pgstev); in kvm_s390_set_cmma_bits()
2179 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_set_cmma_bits()
2180 mmap_read_unlock(kvm->mm); in kvm_s390_set_cmma_bits()
2182 if (!kvm->mm->context.uses_cmm) { in kvm_s390_set_cmma_bits()
2183 mmap_write_lock(kvm->mm); in kvm_s390_set_cmma_bits()
2184 kvm->mm->context.uses_cmm = 1; in kvm_s390_set_cmma_bits()
2185 mmap_write_unlock(kvm->mm); in kvm_s390_set_cmma_bits()
2192 static int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rcp, u16 *rrcp) in kvm_s390_cpus_from_pv() argument
2207 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_s390_cpus_from_pv()
2219 static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc) in kvm_s390_cpus_to_pv() argument
2226 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_s390_cpus_to_pv()
2234 kvm_s390_cpus_from_pv(kvm, &dummy, &dummy); in kvm_s390_cpus_to_pv()
2238 static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd) in kvm_s390_handle_pv() argument
2247 if (kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2254 r = sca_switch_to_extended(kvm); in kvm_s390_handle_pv()
2264 r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2268 r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2270 kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy); in kvm_s390_handle_pv()
2273 set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2278 if (!kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2281 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2289 r = kvm_s390_pv_deinit_vm(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2292 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2300 if (!kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2320 r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length, in kvm_s390_handle_pv()
2330 if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm)) in kvm_s390_handle_pv()
2337 r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak, in kvm_s390_handle_pv()
2343 if (!kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2346 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), in kvm_s390_handle_pv()
2348 KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc, in kvm_s390_handle_pv()
2354 if (!kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2357 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), in kvm_s390_handle_pv()
2359 KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x", in kvm_s390_handle_pv()
2365 if (!kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2368 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), in kvm_s390_handle_pv()
2370 KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x", in kvm_s390_handle_pv()
2383 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl() local
2395 r = kvm_s390_inject_vm(kvm, &s390int); in kvm_arch_vm_ioctl()
2402 if (kvm->arch.use_irqchip) { in kvm_arch_vm_ioctl()
2405 r = kvm_set_irq_routing(kvm, &routing, 0, 0); in kvm_arch_vm_ioctl()
2413 r = kvm_s390_vm_set_attr(kvm, &attr); in kvm_arch_vm_ioctl()
2420 r = kvm_s390_vm_get_attr(kvm, &attr); in kvm_arch_vm_ioctl()
2427 r = kvm_s390_vm_has_attr(kvm, &attr); in kvm_arch_vm_ioctl()
2437 r = kvm_s390_get_skeys(kvm, &args); in kvm_arch_vm_ioctl()
2447 r = kvm_s390_set_skeys(kvm, &args); in kvm_arch_vm_ioctl()
2456 mutex_lock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
2457 r = kvm_s390_get_cmma_bits(kvm, &args); in kvm_arch_vm_ioctl()
2458 mutex_unlock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
2472 mutex_lock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
2473 r = kvm_s390_set_cmma_bits(kvm, &args); in kvm_arch_vm_ioctl()
2474 mutex_unlock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
2481 kvm->arch.user_cpu_state_ctrl = 1; in kvm_arch_vm_ioctl()
2495 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
2496 r = kvm_s390_handle_pv(kvm, &args); in kvm_arch_vm_ioctl()
2497 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
2531 static void kvm_s390_set_crycb_format(struct kvm *kvm) in kvm_s390_set_crycb_format() argument
2533 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb; in kvm_s390_set_crycb_format()
2536 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK); in kvm_s390_set_crycb_format()
2539 if (!test_kvm_facility(kvm, 76)) in kvm_s390_set_crycb_format()
2543 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2; in kvm_s390_set_crycb_format()
2545 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; in kvm_s390_set_crycb_format()
2548 void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm, in kvm_arch_crypto_set_masks() argument
2551 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb; in kvm_arch_crypto_set_masks()
2553 mutex_lock(&kvm->lock); in kvm_arch_crypto_set_masks()
2554 kvm_s390_vcpu_block_all(kvm); in kvm_arch_crypto_set_masks()
2556 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) { in kvm_arch_crypto_set_masks()
2559 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx", in kvm_arch_crypto_set_masks()
2562 VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx", in kvm_arch_crypto_set_masks()
2565 VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx", in kvm_arch_crypto_set_masks()
2573 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x", in kvm_arch_crypto_set_masks()
2582 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART); in kvm_arch_crypto_set_masks()
2583 kvm_s390_vcpu_unblock_all(kvm); in kvm_arch_crypto_set_masks()
2584 mutex_unlock(&kvm->lock); in kvm_arch_crypto_set_masks()
2588 void kvm_arch_crypto_clear_masks(struct kvm *kvm) in kvm_arch_crypto_clear_masks() argument
2590 mutex_lock(&kvm->lock); in kvm_arch_crypto_clear_masks()
2591 kvm_s390_vcpu_block_all(kvm); in kvm_arch_crypto_clear_masks()
2593 memset(&kvm->arch.crypto.crycb->apcb0, 0, in kvm_arch_crypto_clear_masks()
2594 sizeof(kvm->arch.crypto.crycb->apcb0)); in kvm_arch_crypto_clear_masks()
2595 memset(&kvm->arch.crypto.crycb->apcb1, 0, in kvm_arch_crypto_clear_masks()
2596 sizeof(kvm->arch.crypto.crycb->apcb1)); in kvm_arch_crypto_clear_masks()
2598 VM_EVENT(kvm, 3, "%s", "CLR CRYCB:"); in kvm_arch_crypto_clear_masks()
2600 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART); in kvm_arch_crypto_clear_masks()
2601 kvm_s390_vcpu_unblock_all(kvm); in kvm_arch_crypto_clear_masks()
2602 mutex_unlock(&kvm->lock); in kvm_arch_crypto_clear_masks()
2615 static void kvm_s390_crypto_init(struct kvm *kvm) in kvm_s390_crypto_init() argument
2617 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb; in kvm_s390_crypto_init()
2618 kvm_s390_set_crycb_format(kvm); in kvm_s390_crypto_init()
2620 if (!test_kvm_facility(kvm, 76)) in kvm_s390_crypto_init()
2624 kvm->arch.crypto.aes_kw = 1; in kvm_s390_crypto_init()
2625 kvm->arch.crypto.dea_kw = 1; in kvm_s390_crypto_init()
2626 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_crypto_init()
2627 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_crypto_init()
2628 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_crypto_init()
2629 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_crypto_init()
2632 static void sca_dispose(struct kvm *kvm) in sca_dispose() argument
2634 if (kvm->arch.use_esca) in sca_dispose()
2635 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block)); in sca_dispose()
2637 free_page((unsigned long)(kvm->arch.sca)); in sca_dispose()
2638 kvm->arch.sca = NULL; in sca_dispose()
2641 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) in kvm_arch_init_vm() argument
2667 rwlock_init(&kvm->arch.sca_lock); in kvm_arch_init_vm()
2669 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags); in kvm_arch_init_vm()
2670 if (!kvm->arch.sca) in kvm_arch_init_vm()
2676 kvm->arch.sca = (struct bsca_block *) in kvm_arch_init_vm()
2677 ((char *) kvm->arch.sca + sca_offset); in kvm_arch_init_vm()
2682 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long)); in kvm_arch_init_vm()
2683 if (!kvm->arch.dbf) in kvm_arch_init_vm()
2687 kvm->arch.sie_page2 = in kvm_arch_init_vm()
2689 if (!kvm->arch.sie_page2) in kvm_arch_init_vm()
2692 kvm->arch.sie_page2->kvm = kvm; in kvm_arch_init_vm()
2693 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list; in kvm_arch_init_vm()
2696 kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] & in kvm_arch_init_vm()
2699 kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] & in kvm_arch_init_vm()
2702 kvm->arch.model.subfuncs = kvm_s390_available_subfunc; in kvm_arch_init_vm()
2705 set_kvm_facility(kvm->arch.model.fac_mask, 138); in kvm_arch_init_vm()
2706 set_kvm_facility(kvm->arch.model.fac_list, 138); in kvm_arch_init_vm()
2708 set_kvm_facility(kvm->arch.model.fac_mask, 74); in kvm_arch_init_vm()
2709 set_kvm_facility(kvm->arch.model.fac_list, 74); in kvm_arch_init_vm()
2711 set_kvm_facility(kvm->arch.model.fac_mask, 147); in kvm_arch_init_vm()
2712 set_kvm_facility(kvm->arch.model.fac_list, 147); in kvm_arch_init_vm()
2716 set_kvm_facility(kvm->arch.model.fac_mask, 65); in kvm_arch_init_vm()
2718 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid(); in kvm_arch_init_vm()
2719 kvm->arch.model.ibc = sclp.ibc & 0x0fff; in kvm_arch_init_vm()
2721 kvm_s390_crypto_init(kvm); in kvm_arch_init_vm()
2723 mutex_init(&kvm->arch.float_int.ais_lock); in kvm_arch_init_vm()
2724 spin_lock_init(&kvm->arch.float_int.lock); in kvm_arch_init_vm()
2726 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]); in kvm_arch_init_vm()
2727 init_waitqueue_head(&kvm->arch.ipte_wq); in kvm_arch_init_vm()
2728 mutex_init(&kvm->arch.ipte_mutex); in kvm_arch_init_vm()
2730 debug_register_view(kvm->arch.dbf, &debug_sprintf_view); in kvm_arch_init_vm()
2731 VM_EVENT(kvm, 3, "vm created with type %lu", type); in kvm_arch_init_vm()
2734 kvm->arch.gmap = NULL; in kvm_arch_init_vm()
2735 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT; in kvm_arch_init_vm()
2738 kvm->arch.mem_limit = TASK_SIZE_MAX; in kvm_arch_init_vm()
2740 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX, in kvm_arch_init_vm()
2742 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1); in kvm_arch_init_vm()
2743 if (!kvm->arch.gmap) in kvm_arch_init_vm()
2745 kvm->arch.gmap->private = kvm; in kvm_arch_init_vm()
2746 kvm->arch.gmap->pfault_enabled = 0; in kvm_arch_init_vm()
2749 kvm->arch.use_pfmfi = sclp.has_pfmfi; in kvm_arch_init_vm()
2750 kvm->arch.use_skf = sclp.has_skey; in kvm_arch_init_vm()
2751 spin_lock_init(&kvm->arch.start_stop_lock); in kvm_arch_init_vm()
2752 kvm_s390_vsie_init(kvm); in kvm_arch_init_vm()
2754 kvm_s390_gisa_init(kvm); in kvm_arch_init_vm()
2755 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid); in kvm_arch_init_vm()
2759 free_page((unsigned long)kvm->arch.sie_page2); in kvm_arch_init_vm()
2760 debug_unregister(kvm->arch.dbf); in kvm_arch_init_vm()
2761 sca_dispose(kvm); in kvm_arch_init_vm()
2774 if (!kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_destroy()
2777 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_destroy()
2780 if (vcpu->kvm->arch.use_cmma) in kvm_arch_vcpu_destroy()
2788 static void kvm_free_vcpus(struct kvm *kvm) in kvm_free_vcpus() argument
2793 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_free_vcpus()
2796 mutex_lock(&kvm->lock); in kvm_free_vcpus()
2797 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) in kvm_free_vcpus()
2798 kvm->vcpus[i] = NULL; in kvm_free_vcpus()
2800 atomic_set(&kvm->online_vcpus, 0); in kvm_free_vcpus()
2801 mutex_unlock(&kvm->lock); in kvm_free_vcpus()
2804 void kvm_arch_destroy_vm(struct kvm *kvm) in kvm_arch_destroy_vm() argument
2808 kvm_free_vcpus(kvm); in kvm_arch_destroy_vm()
2809 sca_dispose(kvm); in kvm_arch_destroy_vm()
2810 kvm_s390_gisa_destroy(kvm); in kvm_arch_destroy_vm()
2817 if (kvm_s390_pv_get_handle(kvm)) in kvm_arch_destroy_vm()
2818 kvm_s390_pv_deinit_vm(kvm, &rc, &rrc); in kvm_arch_destroy_vm()
2819 debug_unregister(kvm->arch.dbf); in kvm_arch_destroy_vm()
2820 free_page((unsigned long)kvm->arch.sie_page2); in kvm_arch_destroy_vm()
2821 if (!kvm_is_ucontrol(kvm)) in kvm_arch_destroy_vm()
2822 gmap_remove(kvm->arch.gmap); in kvm_arch_destroy_vm()
2823 kvm_s390_destroy_adapters(kvm); in kvm_arch_destroy_vm()
2824 kvm_s390_clear_float_irqs(kvm); in kvm_arch_destroy_vm()
2825 kvm_s390_vsie_destroy(kvm); in kvm_arch_destroy_vm()
2826 KVM_EVENT(3, "vm 0x%pK destroyed", kvm); in kvm_arch_destroy_vm()
2835 vcpu->arch.gmap->private = vcpu->kvm; in __kvm_ucontrol_vcpu_init()
2844 read_lock(&vcpu->kvm->arch.sca_lock); in sca_del_vcpu()
2845 if (vcpu->kvm->arch.use_esca) { in sca_del_vcpu()
2846 struct esca_block *sca = vcpu->kvm->arch.sca; in sca_del_vcpu()
2851 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_del_vcpu()
2856 read_unlock(&vcpu->kvm->arch.sca_lock); in sca_del_vcpu()
2862 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
2869 read_lock(&vcpu->kvm->arch.sca_lock); in sca_add_vcpu()
2870 if (vcpu->kvm->arch.use_esca) { in sca_add_vcpu()
2871 struct esca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
2879 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
2886 read_unlock(&vcpu->kvm->arch.sca_lock); in sca_add_vcpu()
2907 static int sca_switch_to_extended(struct kvm *kvm) in sca_switch_to_extended() argument
2909 struct bsca_block *old_sca = kvm->arch.sca; in sca_switch_to_extended()
2915 if (kvm->arch.use_esca) in sca_switch_to_extended()
2925 kvm_s390_vcpu_block_all(kvm); in sca_switch_to_extended()
2926 write_lock(&kvm->arch.sca_lock); in sca_switch_to_extended()
2930 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) { in sca_switch_to_extended()
2935 kvm->arch.sca = new_sca; in sca_switch_to_extended()
2936 kvm->arch.use_esca = 1; in sca_switch_to_extended()
2938 write_unlock(&kvm->arch.sca_lock); in sca_switch_to_extended()
2939 kvm_s390_vcpu_unblock_all(kvm); in sca_switch_to_extended()
2943 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)", in sca_switch_to_extended()
2944 old_sca, kvm->arch.sca); in sca_switch_to_extended()
2948 static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id) in sca_can_add_vcpu() argument
2962 mutex_lock(&kvm->lock); in sca_can_add_vcpu()
2963 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm); in sca_can_add_vcpu()
2964 mutex_unlock(&kvm->lock); in sca_can_add_vcpu()
3079 mutex_lock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
3081 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; in kvm_arch_vcpu_postcreate()
3082 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx; in kvm_arch_vcpu_postcreate()
3084 mutex_unlock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
3085 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_postcreate()
3086 vcpu->arch.gmap = vcpu->kvm->arch.gmap; in kvm_arch_vcpu_postcreate()
3089 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0) in kvm_arch_vcpu_postcreate()
3095 static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr) in kvm_has_pckmo_subfunc() argument
3097 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) && in kvm_has_pckmo_subfunc()
3103 static bool kvm_has_pckmo_ecc(struct kvm *kvm) in kvm_has_pckmo_ecc() argument
3106 return kvm_has_pckmo_subfunc(kvm, 32) || in kvm_has_pckmo_ecc()
3107 kvm_has_pckmo_subfunc(kvm, 33) || in kvm_has_pckmo_ecc()
3108 kvm_has_pckmo_subfunc(kvm, 34) || in kvm_has_pckmo_ecc()
3109 kvm_has_pckmo_subfunc(kvm, 40) || in kvm_has_pckmo_ecc()
3110 kvm_has_pckmo_subfunc(kvm, 41); in kvm_has_pckmo_ecc()
3120 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76)) in kvm_s390_vcpu_crypto_setup()
3123 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; in kvm_s390_vcpu_crypto_setup()
3128 if (vcpu->kvm->arch.crypto.apie) in kvm_s390_vcpu_crypto_setup()
3132 if (vcpu->kvm->arch.crypto.aes_kw) { in kvm_s390_vcpu_crypto_setup()
3135 if (kvm_has_pckmo_ecc(vcpu->kvm)) in kvm_s390_vcpu_crypto_setup()
3139 if (vcpu->kvm->arch.crypto.dea_kw) in kvm_s390_vcpu_crypto_setup()
3159 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model; in kvm_s390_vcpu_setup_model()
3162 if (test_kvm_facility(vcpu->kvm, 7)) in kvm_s390_vcpu_setup_model()
3175 if (test_kvm_facility(vcpu->kvm, 78)) in kvm_s390_vcpu_setup()
3177 else if (test_kvm_facility(vcpu->kvm, 8)) in kvm_s390_vcpu_setup()
3185 if (test_kvm_facility(vcpu->kvm, 9)) in kvm_s390_vcpu_setup()
3187 if (test_kvm_facility(vcpu->kvm, 73)) in kvm_s390_vcpu_setup()
3190 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi) in kvm_s390_vcpu_setup()
3192 if (test_kvm_facility(vcpu->kvm, 130)) in kvm_s390_vcpu_setup()
3203 if (test_kvm_facility(vcpu->kvm, 129)) { in kvm_s390_vcpu_setup()
3207 if (test_kvm_facility(vcpu->kvm, 139)) in kvm_s390_vcpu_setup()
3209 if (test_kvm_facility(vcpu->kvm, 156)) in kvm_s390_vcpu_setup()
3225 if (vcpu->kvm->arch.use_cmma) { in kvm_s390_vcpu_setup()
3237 mutex_lock(&vcpu->kvm->lock); in kvm_s390_vcpu_setup()
3238 if (kvm_s390_pv_is_protected(vcpu->kvm)) { in kvm_s390_vcpu_setup()
3243 mutex_unlock(&vcpu->kvm->lock); in kvm_s390_vcpu_setup()
3248 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) in kvm_arch_vcpu_precreate() argument
3250 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id)) in kvm_arch_vcpu_precreate()
3274 vcpu->arch.sie_block->gd = (u32)(u64)vcpu->kvm->arch.gisa_int.origin; in kvm_arch_vcpu_create()
3289 if (test_kvm_facility(vcpu->kvm, 64)) in kvm_arch_vcpu_create()
3291 if (test_kvm_facility(vcpu->kvm, 82)) in kvm_arch_vcpu_create()
3293 if (test_kvm_facility(vcpu->kvm, 133)) in kvm_arch_vcpu_create()
3295 if (test_kvm_facility(vcpu->kvm, 156)) in kvm_arch_vcpu_create()
3305 if (kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_create()
3311 VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", in kvm_arch_vcpu_create()
3321 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_create()
3330 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask); in kvm_arch_vcpu_runnable()
3389 struct kvm *kvm = gmap->private; in kvm_gmap_notifier() local
3399 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_gmap_notifier()
3537 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) in kvm_arch_vcpu_ioctl_normal_reset()
3769 vcpu->kvm->arch.user_cpu_state_ctrl = 1; in kvm_arch_vcpu_ioctl_set_mpstate()
3866 if ((vcpu->kvm->arch.use_cmma) && in kvm_s390_handle_requests()
3867 (vcpu->kvm->mm->context.uses_cmm)) in kvm_s390_handle_requests()
3880 static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod) in __kvm_s390_set_tod_clock() argument
3890 kvm->arch.epoch = gtod->tod - htod.tod; in __kvm_s390_set_tod_clock()
3891 kvm->arch.epdx = 0; in __kvm_s390_set_tod_clock()
3892 if (test_kvm_facility(kvm, 139)) { in __kvm_s390_set_tod_clock()
3893 kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx; in __kvm_s390_set_tod_clock()
3894 if (kvm->arch.epoch > gtod->tod) in __kvm_s390_set_tod_clock()
3895 kvm->arch.epdx -= 1; in __kvm_s390_set_tod_clock()
3898 kvm_s390_vcpu_block_all(kvm); in __kvm_s390_set_tod_clock()
3899 kvm_for_each_vcpu(i, vcpu, kvm) { in __kvm_s390_set_tod_clock()
3900 vcpu->arch.sie_block->epoch = kvm->arch.epoch; in __kvm_s390_set_tod_clock()
3901 vcpu->arch.sie_block->epdx = kvm->arch.epdx; in __kvm_s390_set_tod_clock()
3904 kvm_s390_vcpu_unblock_all(kvm); in __kvm_s390_set_tod_clock()
3908 int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod) in kvm_s390_try_set_tod_clock() argument
3910 if (!mutex_trylock(&kvm->lock)) in kvm_s390_try_set_tod_clock()
3912 __kvm_s390_set_tod_clock(kvm, gtod); in kvm_s390_try_set_tod_clock()
3913 mutex_unlock(&kvm->lock); in kvm_s390_try_set_tod_clock()
3946 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); in __kvm_inject_pfault_token()
4000 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr)); in kvm_arch_setup_async_pf()
4025 if (!kvm_is_ucontrol(vcpu->kvm)) { in vcpu_pre_run()
4040 clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.gisa_int.kicked_mask); in vcpu_pre_run()
4123 } else if (kvm_is_ucontrol(vcpu->kvm)) { in vcpu_post_run()
4149 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in __vcpu_run()
4156 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in __vcpu_run()
4191 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in __vcpu_run()
4196 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in __vcpu_run()
4231 test_kvm_facility(vcpu->kvm, 64) && in sync_regs_fmt2()
4242 test_kvm_facility(vcpu->kvm, 133) && in sync_regs_fmt2()
4251 test_kvm_facility(vcpu->kvm, 82)) { in sync_regs_fmt2()
4398 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { in kvm_arch_vcpu_ioctl_run()
4517 static void __disable_ibs_on_all_vcpus(struct kvm *kvm) in __disable_ibs_on_all_vcpus() argument
4522 kvm_for_each_vcpu(i, vcpu, kvm) { in __disable_ibs_on_all_vcpus()
4544 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
4545 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_start()
4551 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
4557 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) in kvm_s390_vcpu_start()
4570 __disable_ibs_on_all_vcpus(vcpu->kvm); in kvm_s390_vcpu_start()
4586 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
4600 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
4601 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_stop()
4607 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
4624 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) { in kvm_s390_vcpu_stop()
4626 started_vcpu = vcpu->kvm->vcpus[i]; in kvm_s390_vcpu_stop()
4638 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
4652 if (!vcpu->kvm->arch.css_support) { in kvm_vcpu_ioctl_enable_cap()
4653 vcpu->kvm->arch.css_support = 1; in kvm_vcpu_ioctl_enable_cap()
4654 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support"); in kvm_vcpu_ioctl_enable_cap()
4655 trace_kvm_s390_enable_css(vcpu->kvm); in kvm_vcpu_ioctl_enable_cap()
4759 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_s390_guest_memsida_op()
4775 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); in kvm_s390_guest_memsida_op()
4820 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
4822 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
4888 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
4905 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
4983 && (kvm_is_ucontrol(vcpu->kvm))) { in kvm_arch_vcpu_fault()
4993 int kvm_arch_prepare_memory_region(struct kvm *kvm, in kvm_arch_prepare_memory_region() argument
5009 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit) in kvm_arch_prepare_memory_region()
5013 if (kvm_s390_pv_get_handle(kvm)) in kvm_arch_prepare_memory_region()
5018 void kvm_arch_commit_memory_region(struct kvm *kvm, in kvm_arch_commit_memory_region() argument
5028 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
5032 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
5038 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, in kvm_arch_commit_memory_region()