Lines Matching +full:ipa +full:- +full:clock +full:- +full:query
1 // SPDX-License-Identifier: GPL-2.0
14 #define KMSG_COMPONENT "kvm-s390"
36 #include <asm/asm-offsets.h>
48 #include "kvm-s390.h"
53 #include "trace-s390.h"
201 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
222 /* available subfunctions indicated via query / "test bit" */
233 /* every s390 is virtualization enabled ;-) */ in kvm_arch_hardware_enable()
253 * -delta to the epoch. in kvm_clock_sync_scb()
255 delta = -delta; in kvm_clock_sync_scb()
257 /* sign-extension - we're adding to signed values below */ in kvm_clock_sync_scb()
259 delta_idx = -1; in kvm_clock_sync_scb()
261 scb->epoch += delta; in kvm_clock_sync_scb()
262 if (scb->ecd & ECD_MEF) { in kvm_clock_sync_scb()
263 scb->epdx += delta_idx; in kvm_clock_sync_scb()
264 if (scb->epoch < delta) in kvm_clock_sync_scb()
265 scb->epdx += 1; in kvm_clock_sync_scb()
285 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta); in kvm_clock_sync()
287 kvm->arch.epoch = vcpu->arch.sie_block->epoch; in kvm_clock_sync()
288 kvm->arch.epdx = vcpu->arch.sie_block->epdx; in kvm_clock_sync()
290 if (vcpu->arch.cputm_enabled) in kvm_clock_sync()
291 vcpu->arch.cputm_start += *delta; in kvm_clock_sync()
292 if (vcpu->arch.vsie_block) in kvm_clock_sync()
293 kvm_clock_sync_scb(vcpu->arch.vsie_block, in kvm_clock_sync()
345 static __always_inline void __insn32_query(unsigned int opcode, u8 *query) in __insn32_query() argument
349 " lgr 1,%[query]\n" in __insn32_query()
353 : [query] "d" ((unsigned long)query), [opc] "i" (opcode) in __insn32_query()
369 if (test_facility(28)) /* TOD-clock steering */ in kvm_s390_cpu_feat_init()
465 int rc = -ENOMEM; in kvm_arch_init()
467 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long)); in kvm_arch_init()
469 return -ENOMEM; in kvm_arch_init()
471 kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long)); in kvm_arch_init()
512 return -EINVAL; in kvm_arch_dev_ioctl()
597 struct gmap *gmap = kvm->arch.gmap; in kvm_arch_sync_dirty_log()
601 cur_gfn = memslot->base_gfn; in kvm_arch_sync_dirty_log()
602 last_gfn = memslot->base_gfn + memslot->npages; in kvm_arch_sync_dirty_log()
637 return -EINVAL; in kvm_vm_ioctl_get_dirty_log()
639 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
641 r = -EINVAL; in kvm_vm_ioctl_get_dirty_log()
642 if (log->slot >= KVM_USER_MEM_SLOTS) in kvm_vm_ioctl_get_dirty_log()
652 memset(memslot->dirty_bitmap, 0, n); in kvm_vm_ioctl_get_dirty_log()
656 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
674 if (cap->flags) in kvm_vm_ioctl_enable_cap()
675 return -EINVAL; in kvm_vm_ioctl_enable_cap()
677 switch (cap->cap) { in kvm_vm_ioctl_enable_cap()
680 kvm->arch.use_irqchip = 1; in kvm_vm_ioctl_enable_cap()
685 kvm->arch.user_sigp = 1; in kvm_vm_ioctl_enable_cap()
689 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
690 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
691 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
693 set_kvm_facility(kvm->arch.model.fac_mask, 129); in kvm_vm_ioctl_enable_cap()
694 set_kvm_facility(kvm->arch.model.fac_list, 129); in kvm_vm_ioctl_enable_cap()
696 set_kvm_facility(kvm->arch.model.fac_mask, 134); in kvm_vm_ioctl_enable_cap()
697 set_kvm_facility(kvm->arch.model.fac_list, 134); in kvm_vm_ioctl_enable_cap()
700 set_kvm_facility(kvm->arch.model.fac_mask, 135); in kvm_vm_ioctl_enable_cap()
701 set_kvm_facility(kvm->arch.model.fac_list, 135); in kvm_vm_ioctl_enable_cap()
704 set_kvm_facility(kvm->arch.model.fac_mask, 148); in kvm_vm_ioctl_enable_cap()
705 set_kvm_facility(kvm->arch.model.fac_list, 148); in kvm_vm_ioctl_enable_cap()
708 set_kvm_facility(kvm->arch.model.fac_mask, 152); in kvm_vm_ioctl_enable_cap()
709 set_kvm_facility(kvm->arch.model.fac_list, 152); in kvm_vm_ioctl_enable_cap()
713 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
714 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
719 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
720 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
721 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
722 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
724 set_kvm_facility(kvm->arch.model.fac_mask, 64); in kvm_vm_ioctl_enable_cap()
725 set_kvm_facility(kvm->arch.model.fac_list, 64); in kvm_vm_ioctl_enable_cap()
728 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
733 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
734 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
735 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
737 set_kvm_facility(kvm->arch.model.fac_mask, 72); in kvm_vm_ioctl_enable_cap()
738 set_kvm_facility(kvm->arch.model.fac_list, 72); in kvm_vm_ioctl_enable_cap()
741 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
746 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
747 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
748 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
749 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
751 set_kvm_facility(kvm->arch.model.fac_mask, 133); in kvm_vm_ioctl_enable_cap()
752 set_kvm_facility(kvm->arch.model.fac_list, 133); in kvm_vm_ioctl_enable_cap()
755 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
760 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
761 if (kvm->created_vcpus) in kvm_vm_ioctl_enable_cap()
762 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
763 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_enable_cap()
764 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
767 mmap_write_lock(kvm->mm); in kvm_vm_ioctl_enable_cap()
768 kvm->mm->context.allow_gmap_hpage_1m = 1; in kvm_vm_ioctl_enable_cap()
769 mmap_write_unlock(kvm->mm); in kvm_vm_ioctl_enable_cap()
775 kvm->arch.use_skf = 0; in kvm_vm_ioctl_enable_cap()
776 kvm->arch.use_pfmfi = 0; in kvm_vm_ioctl_enable_cap()
778 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
784 kvm->arch.user_stsi = 1; in kvm_vm_ioctl_enable_cap()
789 kvm->arch.user_instr0 = 1; in kvm_vm_ioctl_enable_cap()
794 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
804 switch (attr->attr) { in kvm_s390_get_mem_control()
807 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes", in kvm_s390_get_mem_control()
808 kvm->arch.mem_limit); in kvm_s390_get_mem_control()
809 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr)) in kvm_s390_get_mem_control()
810 ret = -EFAULT; in kvm_s390_get_mem_control()
813 ret = -ENXIO; in kvm_s390_get_mem_control()
823 switch (attr->attr) { in kvm_s390_set_mem_control()
825 ret = -ENXIO; in kvm_s390_set_mem_control()
830 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
831 if (kvm->created_vcpus) in kvm_s390_set_mem_control()
832 ret = -EBUSY; in kvm_s390_set_mem_control()
833 else if (kvm->mm->context.allow_gmap_hpage_1m) in kvm_s390_set_mem_control()
834 ret = -EINVAL; in kvm_s390_set_mem_control()
836 kvm->arch.use_cmma = 1; in kvm_s390_set_mem_control()
838 kvm->arch.use_pfmfi = 0; in kvm_s390_set_mem_control()
841 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
844 ret = -ENXIO; in kvm_s390_set_mem_control()
847 ret = -EINVAL; in kvm_s390_set_mem_control()
848 if (!kvm->arch.use_cmma) in kvm_s390_set_mem_control()
852 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
853 idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_mem_control()
854 s390_reset_cmma(kvm->arch.gmap->mm); in kvm_s390_set_mem_control()
855 srcu_read_unlock(&kvm->srcu, idx); in kvm_s390_set_mem_control()
856 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
863 return -EINVAL; in kvm_s390_set_mem_control()
865 if (get_user(new_limit, (u64 __user *)attr->addr)) in kvm_s390_set_mem_control()
866 return -EFAULT; in kvm_s390_set_mem_control()
868 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT && in kvm_s390_set_mem_control()
869 new_limit > kvm->arch.mem_limit) in kvm_s390_set_mem_control()
870 return -E2BIG; in kvm_s390_set_mem_control()
873 return -EINVAL; in kvm_s390_set_mem_control()
877 new_limit -= 1; in kvm_s390_set_mem_control()
879 ret = -EBUSY; in kvm_s390_set_mem_control()
880 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
881 if (!kvm->created_vcpus) { in kvm_s390_set_mem_control()
883 struct gmap *new = gmap_create(current->mm, new_limit); in kvm_s390_set_mem_control()
886 ret = -ENOMEM; in kvm_s390_set_mem_control()
888 gmap_remove(kvm->arch.gmap); in kvm_s390_set_mem_control()
889 new->private = kvm; in kvm_s390_set_mem_control()
890 kvm->arch.gmap = new; in kvm_s390_set_mem_control()
894 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
897 (void *) kvm->arch.gmap->asce); in kvm_s390_set_mem_control()
901 ret = -ENXIO; in kvm_s390_set_mem_control()
927 mutex_lock(&kvm->lock); in kvm_s390_vm_set_crypto()
928 switch (attr->attr) { in kvm_s390_vm_set_crypto()
931 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
932 return -EINVAL; in kvm_s390_vm_set_crypto()
935 kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_vm_set_crypto()
936 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
937 kvm->arch.crypto.aes_kw = 1; in kvm_s390_vm_set_crypto()
942 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
943 return -EINVAL; in kvm_s390_vm_set_crypto()
946 kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_vm_set_crypto()
947 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
948 kvm->arch.crypto.dea_kw = 1; in kvm_s390_vm_set_crypto()
953 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
954 return -EINVAL; in kvm_s390_vm_set_crypto()
956 kvm->arch.crypto.aes_kw = 0; in kvm_s390_vm_set_crypto()
957 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
958 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
963 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
964 return -EINVAL; in kvm_s390_vm_set_crypto()
966 kvm->arch.crypto.dea_kw = 0; in kvm_s390_vm_set_crypto()
967 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
968 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
973 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
974 return -EOPNOTSUPP; in kvm_s390_vm_set_crypto()
976 kvm->arch.crypto.apie = 1; in kvm_s390_vm_set_crypto()
980 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
981 return -EOPNOTSUPP; in kvm_s390_vm_set_crypto()
983 kvm->arch.crypto.apie = 0; in kvm_s390_vm_set_crypto()
986 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
987 return -ENXIO; in kvm_s390_vm_set_crypto()
991 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1005 * Must be called with kvm->srcu held to avoid races on memslots, and with
1006 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
1016 if (kvm->arch.migration_mode) in kvm_s390_vm_start_migration()
1019 if (!slots || !slots->used_slots) in kvm_s390_vm_start_migration()
1020 return -EINVAL; in kvm_s390_vm_start_migration()
1022 if (!kvm->arch.use_cmma) { in kvm_s390_vm_start_migration()
1023 kvm->arch.migration_mode = 1; in kvm_s390_vm_start_migration()
1027 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) { in kvm_s390_vm_start_migration()
1028 ms = slots->memslots + slotnr; in kvm_s390_vm_start_migration()
1029 if (!ms->dirty_bitmap) in kvm_s390_vm_start_migration()
1030 return -EINVAL; in kvm_s390_vm_start_migration()
1038 ram_pages += ms->npages; in kvm_s390_vm_start_migration()
1040 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages); in kvm_s390_vm_start_migration()
1041 kvm->arch.migration_mode = 1; in kvm_s390_vm_start_migration()
1047 * Must be called with kvm->slots_lock to avoid races with ourselves and
1053 if (!kvm->arch.migration_mode) in kvm_s390_vm_stop_migration()
1055 kvm->arch.migration_mode = 0; in kvm_s390_vm_stop_migration()
1056 if (kvm->arch.use_cmma) in kvm_s390_vm_stop_migration()
1064 int res = -ENXIO; in kvm_s390_vm_set_migration()
1066 mutex_lock(&kvm->slots_lock); in kvm_s390_vm_set_migration()
1067 switch (attr->attr) { in kvm_s390_vm_set_migration()
1077 mutex_unlock(&kvm->slots_lock); in kvm_s390_vm_set_migration()
1085 u64 mig = kvm->arch.migration_mode; in kvm_s390_vm_get_migration()
1087 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS) in kvm_s390_vm_get_migration()
1088 return -ENXIO; in kvm_s390_vm_get_migration()
1090 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig))) in kvm_s390_vm_get_migration()
1091 return -EFAULT; in kvm_s390_vm_get_migration()
1101 if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod))) in kvm_s390_set_tod_ext()
1102 return -EFAULT; in kvm_s390_set_tod_ext()
1105 return -EINVAL; in kvm_s390_set_tod_ext()
1118 if (copy_from_user(>od_high, (void __user *)attr->addr, in kvm_s390_set_tod_high()
1120 return -EFAULT; in kvm_s390_set_tod_high()
1123 return -EINVAL; in kvm_s390_set_tod_high()
1133 if (copy_from_user(>od.tod, (void __user *)attr->addr, in kvm_s390_set_tod_low()
1135 return -EFAULT; in kvm_s390_set_tod_low()
1146 if (attr->flags) in kvm_s390_set_tod()
1147 return -EINVAL; in kvm_s390_set_tod()
1149 mutex_lock(&kvm->lock); in kvm_s390_set_tod()
1155 ret = -EOPNOTSUPP; in kvm_s390_set_tod()
1159 switch (attr->attr) { in kvm_s390_set_tod()
1170 ret = -ENXIO; in kvm_s390_set_tod()
1175 mutex_unlock(&kvm->lock); in kvm_s390_set_tod()
1188 gtod->tod = htod.tod + kvm->arch.epoch; in kvm_s390_get_tod_clock()
1189 gtod->epoch_idx = 0; in kvm_s390_get_tod_clock()
1191 gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx; in kvm_s390_get_tod_clock()
1192 if (gtod->tod < htod.tod) in kvm_s390_get_tod_clock()
1193 gtod->epoch_idx += 1; in kvm_s390_get_tod_clock()
1205 if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod))) in kvm_s390_get_tod_ext()
1206 return -EFAULT; in kvm_s390_get_tod_ext()
1208 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx", in kvm_s390_get_tod_ext()
1217 if (copy_to_user((void __user *)attr->addr, >od_high, in kvm_s390_get_tod_high()
1219 return -EFAULT; in kvm_s390_get_tod_high()
1220 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high); in kvm_s390_get_tod_high()
1230 if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod))) in kvm_s390_get_tod_low()
1231 return -EFAULT; in kvm_s390_get_tod_low()
1232 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod); in kvm_s390_get_tod_low()
1241 if (attr->flags) in kvm_s390_get_tod()
1242 return -EINVAL; in kvm_s390_get_tod()
1244 switch (attr->attr) { in kvm_s390_get_tod()
1255 ret = -ENXIO; in kvm_s390_get_tod()
1267 mutex_lock(&kvm->lock); in kvm_s390_set_processor()
1268 if (kvm->created_vcpus) { in kvm_s390_set_processor()
1269 ret = -EBUSY; in kvm_s390_set_processor()
1274 ret = -ENOMEM; in kvm_s390_set_processor()
1277 if (!copy_from_user(proc, (void __user *)attr->addr, in kvm_s390_set_processor()
1279 kvm->arch.model.cpuid = proc->cpuid; in kvm_s390_set_processor()
1282 if (lowest_ibc && proc->ibc) { in kvm_s390_set_processor()
1283 if (proc->ibc > unblocked_ibc) in kvm_s390_set_processor()
1284 kvm->arch.model.ibc = unblocked_ibc; in kvm_s390_set_processor()
1285 else if (proc->ibc < lowest_ibc) in kvm_s390_set_processor()
1286 kvm->arch.model.ibc = lowest_ibc; in kvm_s390_set_processor()
1288 kvm->arch.model.ibc = proc->ibc; in kvm_s390_set_processor()
1290 memcpy(kvm->arch.model.fac_list, proc->fac_list, in kvm_s390_set_processor()
1293 kvm->arch.model.ibc, in kvm_s390_set_processor()
1294 kvm->arch.model.cpuid); in kvm_s390_set_processor()
1296 kvm->arch.model.fac_list[0], in kvm_s390_set_processor()
1297 kvm->arch.model.fac_list[1], in kvm_s390_set_processor()
1298 kvm->arch.model.fac_list[2]); in kvm_s390_set_processor()
1300 ret = -EFAULT; in kvm_s390_set_processor()
1303 mutex_unlock(&kvm->lock); in kvm_s390_set_processor()
1312 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data))) in kvm_s390_set_processor_feat()
1313 return -EFAULT; in kvm_s390_set_processor_feat()
1317 return -EINVAL; in kvm_s390_set_processor_feat()
1319 mutex_lock(&kvm->lock); in kvm_s390_set_processor_feat()
1320 if (kvm->created_vcpus) { in kvm_s390_set_processor_feat()
1321 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_feat()
1322 return -EBUSY; in kvm_s390_set_processor_feat()
1324 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat, in kvm_s390_set_processor_feat()
1326 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_feat()
1337 mutex_lock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1338 if (kvm->created_vcpus) { in kvm_s390_set_processor_subfunc()
1339 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1340 return -EBUSY; in kvm_s390_set_processor_subfunc()
1343 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr, in kvm_s390_set_processor_subfunc()
1345 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1346 return -EFAULT; in kvm_s390_set_processor_subfunc()
1348 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1351 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], in kvm_s390_set_processor_subfunc()
1352 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], in kvm_s390_set_processor_subfunc()
1353 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], in kvm_s390_set_processor_subfunc()
1354 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); in kvm_s390_set_processor_subfunc()
1356 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], in kvm_s390_set_processor_subfunc()
1357 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); in kvm_s390_set_processor_subfunc()
1359 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], in kvm_s390_set_processor_subfunc()
1360 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); in kvm_s390_set_processor_subfunc()
1362 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], in kvm_s390_set_processor_subfunc()
1363 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); in kvm_s390_set_processor_subfunc()
1365 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], in kvm_s390_set_processor_subfunc()
1366 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); in kvm_s390_set_processor_subfunc()
1368 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], in kvm_s390_set_processor_subfunc()
1369 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); in kvm_s390_set_processor_subfunc()
1371 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], in kvm_s390_set_processor_subfunc()
1372 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); in kvm_s390_set_processor_subfunc()
1374 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], in kvm_s390_set_processor_subfunc()
1375 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); in kvm_s390_set_processor_subfunc()
1377 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], in kvm_s390_set_processor_subfunc()
1378 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); in kvm_s390_set_processor_subfunc()
1380 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], in kvm_s390_set_processor_subfunc()
1381 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); in kvm_s390_set_processor_subfunc()
1383 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], in kvm_s390_set_processor_subfunc()
1384 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); in kvm_s390_set_processor_subfunc()
1386 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], in kvm_s390_set_processor_subfunc()
1387 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); in kvm_s390_set_processor_subfunc()
1389 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], in kvm_s390_set_processor_subfunc()
1390 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); in kvm_s390_set_processor_subfunc()
1392 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], in kvm_s390_set_processor_subfunc()
1393 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); in kvm_s390_set_processor_subfunc()
1395 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], in kvm_s390_set_processor_subfunc()
1396 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); in kvm_s390_set_processor_subfunc()
1398 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], in kvm_s390_set_processor_subfunc()
1399 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], in kvm_s390_set_processor_subfunc()
1400 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], in kvm_s390_set_processor_subfunc()
1401 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); in kvm_s390_set_processor_subfunc()
1403 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], in kvm_s390_set_processor_subfunc()
1404 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], in kvm_s390_set_processor_subfunc()
1405 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], in kvm_s390_set_processor_subfunc()
1406 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); in kvm_s390_set_processor_subfunc()
1413 int ret = -ENXIO; in kvm_s390_set_cpu_model()
1415 switch (attr->attr) { in kvm_s390_set_cpu_model()
1436 ret = -ENOMEM; in kvm_s390_get_processor()
1439 proc->cpuid = kvm->arch.model.cpuid; in kvm_s390_get_processor()
1440 proc->ibc = kvm->arch.model.ibc; in kvm_s390_get_processor()
1441 memcpy(&proc->fac_list, kvm->arch.model.fac_list, in kvm_s390_get_processor()
1444 kvm->arch.model.ibc, in kvm_s390_get_processor()
1445 kvm->arch.model.cpuid); in kvm_s390_get_processor()
1447 kvm->arch.model.fac_list[0], in kvm_s390_get_processor()
1448 kvm->arch.model.fac_list[1], in kvm_s390_get_processor()
1449 kvm->arch.model.fac_list[2]); in kvm_s390_get_processor()
1450 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc))) in kvm_s390_get_processor()
1451 ret = -EFAULT; in kvm_s390_get_processor()
1464 ret = -ENOMEM; in kvm_s390_get_machine()
1467 get_cpu_id((struct cpuid *) &mach->cpuid); in kvm_s390_get_machine()
1468 mach->ibc = sclp.ibc; in kvm_s390_get_machine()
1469 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask, in kvm_s390_get_machine()
1471 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list, in kvm_s390_get_machine()
1474 kvm->arch.model.ibc, in kvm_s390_get_machine()
1475 kvm->arch.model.cpuid); in kvm_s390_get_machine()
1477 mach->fac_mask[0], in kvm_s390_get_machine()
1478 mach->fac_mask[1], in kvm_s390_get_machine()
1479 mach->fac_mask[2]); in kvm_s390_get_machine()
1481 mach->fac_list[0], in kvm_s390_get_machine()
1482 mach->fac_list[1], in kvm_s390_get_machine()
1483 mach->fac_list[2]); in kvm_s390_get_machine()
1484 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) in kvm_s390_get_machine()
1485 ret = -EFAULT; in kvm_s390_get_machine()
1496 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat, in kvm_s390_get_processor_feat()
1498 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data))) in kvm_s390_get_processor_feat()
1499 return -EFAULT; in kvm_s390_get_processor_feat()
1515 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data))) in kvm_s390_get_machine_feat()
1516 return -EFAULT; in kvm_s390_get_machine_feat()
1527 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs, in kvm_s390_get_processor_subfunc()
1529 return -EFAULT; in kvm_s390_get_processor_subfunc()
1532 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], in kvm_s390_get_processor_subfunc()
1533 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], in kvm_s390_get_processor_subfunc()
1534 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], in kvm_s390_get_processor_subfunc()
1535 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); in kvm_s390_get_processor_subfunc()
1537 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], in kvm_s390_get_processor_subfunc()
1538 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); in kvm_s390_get_processor_subfunc()
1540 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], in kvm_s390_get_processor_subfunc()
1541 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); in kvm_s390_get_processor_subfunc()
1543 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], in kvm_s390_get_processor_subfunc()
1544 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); in kvm_s390_get_processor_subfunc()
1546 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], in kvm_s390_get_processor_subfunc()
1547 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); in kvm_s390_get_processor_subfunc()
1549 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], in kvm_s390_get_processor_subfunc()
1550 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); in kvm_s390_get_processor_subfunc()
1552 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], in kvm_s390_get_processor_subfunc()
1553 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); in kvm_s390_get_processor_subfunc()
1555 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], in kvm_s390_get_processor_subfunc()
1556 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); in kvm_s390_get_processor_subfunc()
1558 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], in kvm_s390_get_processor_subfunc()
1559 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); in kvm_s390_get_processor_subfunc()
1561 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], in kvm_s390_get_processor_subfunc()
1562 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); in kvm_s390_get_processor_subfunc()
1564 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], in kvm_s390_get_processor_subfunc()
1565 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); in kvm_s390_get_processor_subfunc()
1567 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], in kvm_s390_get_processor_subfunc()
1568 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); in kvm_s390_get_processor_subfunc()
1570 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], in kvm_s390_get_processor_subfunc()
1571 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); in kvm_s390_get_processor_subfunc()
1573 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], in kvm_s390_get_processor_subfunc()
1574 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); in kvm_s390_get_processor_subfunc()
1576 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], in kvm_s390_get_processor_subfunc()
1577 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); in kvm_s390_get_processor_subfunc()
1579 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], in kvm_s390_get_processor_subfunc()
1580 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], in kvm_s390_get_processor_subfunc()
1581 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], in kvm_s390_get_processor_subfunc()
1582 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); in kvm_s390_get_processor_subfunc()
1584 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], in kvm_s390_get_processor_subfunc()
1585 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], in kvm_s390_get_processor_subfunc()
1586 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], in kvm_s390_get_processor_subfunc()
1587 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); in kvm_s390_get_processor_subfunc()
1595 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc, in kvm_s390_get_machine_subfunc()
1597 return -EFAULT; in kvm_s390_get_machine_subfunc()
1662 int ret = -ENXIO; in kvm_s390_get_cpu_model()
1664 switch (attr->attr) { in kvm_s390_get_cpu_model()
1691 switch (attr->group) { in kvm_s390_vm_set_attr()
1708 ret = -ENXIO; in kvm_s390_vm_set_attr()
1719 switch (attr->group) { in kvm_s390_vm_get_attr()
1733 ret = -ENXIO; in kvm_s390_vm_get_attr()
1744 switch (attr->group) { in kvm_s390_vm_has_attr()
1746 switch (attr->attr) { in kvm_s390_vm_has_attr()
1749 ret = sclp.has_cmma ? 0 : -ENXIO; in kvm_s390_vm_has_attr()
1755 ret = -ENXIO; in kvm_s390_vm_has_attr()
1760 switch (attr->attr) { in kvm_s390_vm_has_attr()
1766 ret = -ENXIO; in kvm_s390_vm_has_attr()
1771 switch (attr->attr) { in kvm_s390_vm_has_attr()
1781 ret = -ENXIO; in kvm_s390_vm_has_attr()
1786 switch (attr->attr) { in kvm_s390_vm_has_attr()
1795 ret = ap_instructions_available() ? 0 : -ENXIO; in kvm_s390_vm_has_attr()
1798 ret = -ENXIO; in kvm_s390_vm_has_attr()
1806 ret = -ENXIO; in kvm_s390_vm_has_attr()
1819 if (args->flags != 0) in kvm_s390_get_skeys()
1820 return -EINVAL; in kvm_s390_get_skeys()
1823 if (!mm_uses_skeys(current->mm)) in kvm_s390_get_skeys()
1827 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) in kvm_s390_get_skeys()
1828 return -EINVAL; in kvm_s390_get_skeys()
1830 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL); in kvm_s390_get_skeys()
1832 return -ENOMEM; in kvm_s390_get_skeys()
1834 mmap_read_lock(current->mm); in kvm_s390_get_skeys()
1835 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_get_skeys()
1836 for (i = 0; i < args->count; i++) { in kvm_s390_get_skeys()
1837 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_get_skeys()
1839 r = -EFAULT; in kvm_s390_get_skeys()
1843 r = get_guest_storage_key(current->mm, hva, &keys[i]); in kvm_s390_get_skeys()
1847 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_get_skeys()
1848 mmap_read_unlock(current->mm); in kvm_s390_get_skeys()
1851 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys, in kvm_s390_get_skeys()
1852 sizeof(uint8_t) * args->count); in kvm_s390_get_skeys()
1854 r = -EFAULT; in kvm_s390_get_skeys()
1868 if (args->flags != 0) in kvm_s390_set_skeys()
1869 return -EINVAL; in kvm_s390_set_skeys()
1872 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) in kvm_s390_set_skeys()
1873 return -EINVAL; in kvm_s390_set_skeys()
1875 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL); in kvm_s390_set_skeys()
1877 return -ENOMEM; in kvm_s390_set_skeys()
1879 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr, in kvm_s390_set_skeys()
1880 sizeof(uint8_t) * args->count); in kvm_s390_set_skeys()
1882 r = -EFAULT; in kvm_s390_set_skeys()
1892 mmap_read_lock(current->mm); in kvm_s390_set_skeys()
1893 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_skeys()
1894 while (i < args->count) { in kvm_s390_set_skeys()
1896 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_set_skeys()
1898 r = -EFAULT; in kvm_s390_set_skeys()
1904 r = -EINVAL; in kvm_s390_set_skeys()
1908 r = set_guest_storage_key(current->mm, hva, keys[i], 0); in kvm_s390_set_skeys()
1910 r = fixup_user_fault(current->mm, hva, in kvm_s390_set_skeys()
1918 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_set_skeys()
1919 mmap_read_unlock(current->mm); in kvm_s390_set_skeys()
1941 int start = 0, end = slots->used_slots; in gfn_to_memslot_approx()
1942 int slot = atomic_read(&slots->lru_slot); in gfn_to_memslot_approx()
1943 struct kvm_memory_slot *memslots = slots->memslots; in gfn_to_memslot_approx()
1950 slot = start + (end - start) / 2; in gfn_to_memslot_approx()
1958 if (start >= slots->used_slots) in gfn_to_memslot_approx()
1959 return slots->used_slots - 1; in gfn_to_memslot_approx()
1963 atomic_set(&slots->lru_slot, start); in gfn_to_memslot_approx()
1972 unsigned long pgstev, hva, cur_gfn = args->start_gfn; in kvm_s390_peek_cmma()
1974 args->count = 0; in kvm_s390_peek_cmma()
1975 while (args->count < bufsize) { in kvm_s390_peek_cmma()
1982 return args->count ? 0 : -EFAULT; in kvm_s390_peek_cmma()
1983 if (get_pgste(kvm->mm, hva, &pgstev) < 0) in kvm_s390_peek_cmma()
1985 res[args->count++] = (pgstev >> 24) & 0x43; in kvm_s390_peek_cmma()
1996 struct kvm_memory_slot *ms = slots->memslots + slotidx; in kvm_s390_next_dirty_cmma()
1997 unsigned long ofs = cur_gfn - ms->base_gfn; in kvm_s390_next_dirty_cmma()
1999 if (ms->base_gfn + ms->npages <= cur_gfn) { in kvm_s390_next_dirty_cmma()
2000 slotidx--; in kvm_s390_next_dirty_cmma()
2003 slotidx = slots->used_slots - 1; in kvm_s390_next_dirty_cmma()
2005 ms = slots->memslots + slotidx; in kvm_s390_next_dirty_cmma()
2008 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs); in kvm_s390_next_dirty_cmma()
2009 while ((slotidx > 0) && (ofs >= ms->npages)) { in kvm_s390_next_dirty_cmma()
2010 slotidx--; in kvm_s390_next_dirty_cmma()
2011 ms = slots->memslots + slotidx; in kvm_s390_next_dirty_cmma()
2012 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0); in kvm_s390_next_dirty_cmma()
2014 return ms->base_gfn + ofs; in kvm_s390_next_dirty_cmma()
2024 if (unlikely(!slots->used_slots)) in kvm_s390_get_cmma()
2027 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn); in kvm_s390_get_cmma()
2029 args->count = 0; in kvm_s390_get_cmma()
2030 args->start_gfn = cur_gfn; in kvm_s390_get_cmma()
2034 mem_end = slots->memslots[0].base_gfn + slots->memslots[0].npages; in kvm_s390_get_cmma()
2036 while (args->count < bufsize) { in kvm_s390_get_cmma()
2041 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms))) in kvm_s390_get_cmma()
2042 atomic64_dec(&kvm->arch.cmma_dirty_pages); in kvm_s390_get_cmma()
2043 if (get_pgste(kvm->mm, hva, &pgstev) < 0) in kvm_s390_get_cmma()
2046 res[args->count++] = (pgstev >> 24) & 0x43; in kvm_s390_get_cmma()
2055 (next_gfn - args->start_gfn >= bufsize)) in kvm_s390_get_cmma()
2059 if (cur_gfn - ms->base_gfn >= ms->npages) { in kvm_s390_get_cmma()
2083 if (!kvm->arch.use_cmma) in kvm_s390_get_cmma_bits()
2084 return -ENXIO; in kvm_s390_get_cmma_bits()
2086 if (args->flags & ~KVM_S390_CMMA_PEEK) in kvm_s390_get_cmma_bits()
2087 return -EINVAL; in kvm_s390_get_cmma_bits()
2088 /* Migration mode query, and we are not doing a migration */ in kvm_s390_get_cmma_bits()
2089 peek = !!(args->flags & KVM_S390_CMMA_PEEK); in kvm_s390_get_cmma_bits()
2090 if (!peek && !kvm->arch.migration_mode) in kvm_s390_get_cmma_bits()
2091 return -EINVAL; in kvm_s390_get_cmma_bits()
2093 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX); in kvm_s390_get_cmma_bits()
2094 if (!bufsize || !kvm->mm->context.uses_cmm) { in kvm_s390_get_cmma_bits()
2099 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) { in kvm_s390_get_cmma_bits()
2106 return -ENOMEM; in kvm_s390_get_cmma_bits()
2108 mmap_read_lock(kvm->mm); in kvm_s390_get_cmma_bits()
2109 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_get_cmma_bits()
2114 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_get_cmma_bits()
2115 mmap_read_unlock(kvm->mm); in kvm_s390_get_cmma_bits()
2117 if (kvm->arch.migration_mode) in kvm_s390_get_cmma_bits()
2118 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages); in kvm_s390_get_cmma_bits()
2120 args->remaining = 0; in kvm_s390_get_cmma_bits()
2122 if (copy_to_user((void __user *)args->values, values, args->count)) in kvm_s390_get_cmma_bits()
2123 ret = -EFAULT; in kvm_s390_get_cmma_bits()
2132 * set and the mm->context.uses_cmm flag is set.
2141 mask = args->mask; in kvm_s390_set_cmma_bits()
2143 if (!kvm->arch.use_cmma) in kvm_s390_set_cmma_bits()
2144 return -ENXIO; in kvm_s390_set_cmma_bits()
2146 if (args->flags != 0) in kvm_s390_set_cmma_bits()
2147 return -EINVAL; in kvm_s390_set_cmma_bits()
2149 if (args->count > KVM_S390_CMMA_SIZE_MAX) in kvm_s390_set_cmma_bits()
2150 return -EINVAL; in kvm_s390_set_cmma_bits()
2152 if (args->count == 0) in kvm_s390_set_cmma_bits()
2155 bits = vmalloc(array_size(sizeof(*bits), args->count)); in kvm_s390_set_cmma_bits()
2157 return -ENOMEM; in kvm_s390_set_cmma_bits()
2159 r = copy_from_user(bits, (void __user *)args->values, args->count); in kvm_s390_set_cmma_bits()
2161 r = -EFAULT; in kvm_s390_set_cmma_bits()
2165 mmap_read_lock(kvm->mm); in kvm_s390_set_cmma_bits()
2166 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_cmma_bits()
2167 for (i = 0; i < args->count; i++) { in kvm_s390_set_cmma_bits()
2168 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_set_cmma_bits()
2170 r = -EFAULT; in kvm_s390_set_cmma_bits()
2177 set_pgste_bits(kvm->mm, hva, mask, pgstev); in kvm_s390_set_cmma_bits()
2179 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_set_cmma_bits()
2180 mmap_read_unlock(kvm->mm); in kvm_s390_set_cmma_bits()
2182 if (!kvm->mm->context.uses_cmm) { in kvm_s390_set_cmma_bits()
2183 mmap_write_lock(kvm->mm); in kvm_s390_set_cmma_bits()
2184 kvm->mm->context.uses_cmm = 1; in kvm_s390_set_cmma_bits()
2185 mmap_write_unlock(kvm->mm); in kvm_s390_set_cmma_bits()
2208 mutex_lock(&vcpu->mutex); in kvm_s390_cpus_from_pv()
2212 ret = -EIO; in kvm_s390_cpus_from_pv()
2214 mutex_unlock(&vcpu->mutex); in kvm_s390_cpus_from_pv()
2227 mutex_lock(&vcpu->mutex); in kvm_s390_cpus_to_pv()
2229 mutex_unlock(&vcpu->mutex); in kvm_s390_cpus_to_pv()
2242 void __user *argp = (void __user *)cmd->data; in kvm_s390_handle_pv()
2244 switch (cmd->cmd) { in kvm_s390_handle_pv()
2246 r = -EINVAL; in kvm_s390_handle_pv()
2258 mmap_write_lock(current->mm); in kvm_s390_handle_pv()
2260 mmap_write_unlock(current->mm); in kvm_s390_handle_pv()
2264 r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2268 r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2273 set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2277 r = -EINVAL; in kvm_s390_handle_pv()
2281 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2289 r = kvm_s390_pv_deinit_vm(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2292 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2299 r = -EINVAL; in kvm_s390_handle_pv()
2303 r = -EFAULT; in kvm_s390_handle_pv()
2308 r = -EINVAL; in kvm_s390_handle_pv()
2312 r = -ENOMEM; in kvm_s390_handle_pv()
2317 r = -EFAULT; in kvm_s390_handle_pv()
2321 &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2329 r = -EINVAL; in kvm_s390_handle_pv()
2330 if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm)) in kvm_s390_handle_pv()
2333 r = -EFAULT; in kvm_s390_handle_pv()
2338 &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2342 r = -EINVAL; in kvm_s390_handle_pv()
2347 UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2348 KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc, in kvm_s390_handle_pv()
2349 cmd->rrc); in kvm_s390_handle_pv()
2353 r = -EINVAL; in kvm_s390_handle_pv()
2358 UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2360 cmd->rc, cmd->rrc); in kvm_s390_handle_pv()
2364 r = -EINVAL; in kvm_s390_handle_pv()
2369 UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2371 cmd->rc, cmd->rrc); in kvm_s390_handle_pv()
2375 r = -ENOTTY; in kvm_s390_handle_pv()
2383 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl()
2392 r = -EFAULT; in kvm_arch_vm_ioctl()
2401 r = -EINVAL; in kvm_arch_vm_ioctl()
2402 if (kvm->arch.use_irqchip) { in kvm_arch_vm_ioctl()
2410 r = -EFAULT; in kvm_arch_vm_ioctl()
2417 r = -EFAULT; in kvm_arch_vm_ioctl()
2424 r = -EFAULT; in kvm_arch_vm_ioctl()
2433 r = -EFAULT; in kvm_arch_vm_ioctl()
2443 r = -EFAULT; in kvm_arch_vm_ioctl()
2453 r = -EFAULT; in kvm_arch_vm_ioctl()
2456 mutex_lock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
2458 mutex_unlock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
2462 r = -EFAULT; in kvm_arch_vm_ioctl()
2469 r = -EFAULT; in kvm_arch_vm_ioctl()
2472 mutex_lock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
2474 mutex_unlock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
2481 kvm->arch.user_cpu_state_ctrl = 1; in kvm_arch_vm_ioctl()
2484 r = -EINVAL; in kvm_arch_vm_ioctl()
2488 r = -EFAULT; in kvm_arch_vm_ioctl()
2492 r = -EINVAL; in kvm_arch_vm_ioctl()
2495 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
2497 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
2499 r = -EFAULT; in kvm_arch_vm_ioctl()
2505 r = -ENOTTY; in kvm_arch_vm_ioctl()
2533 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb; in kvm_s390_set_crycb_format()
2535 /* Clear the CRYCB format bits - i.e., set format 0 by default */ in kvm_s390_set_crycb_format()
2536 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK); in kvm_s390_set_crycb_format()
2543 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2; in kvm_s390_set_crycb_format()
2545 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; in kvm_s390_set_crycb_format()
2551 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb; in kvm_arch_crypto_set_masks()
2553 mutex_lock(&kvm->lock); in kvm_arch_crypto_set_masks()
2556 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) { in kvm_arch_crypto_set_masks()
2558 memcpy(crycb->apcb1.apm, apm, 32); in kvm_arch_crypto_set_masks()
2561 memcpy(crycb->apcb1.aqm, aqm, 32); in kvm_arch_crypto_set_masks()
2564 memcpy(crycb->apcb1.adm, adm, 32); in kvm_arch_crypto_set_masks()
2570 memcpy(crycb->apcb0.apm, apm, 8); in kvm_arch_crypto_set_masks()
2571 memcpy(crycb->apcb0.aqm, aqm, 2); in kvm_arch_crypto_set_masks()
2572 memcpy(crycb->apcb0.adm, adm, 2); in kvm_arch_crypto_set_masks()
2584 mutex_unlock(&kvm->lock); in kvm_arch_crypto_set_masks()
2590 mutex_lock(&kvm->lock); in kvm_arch_crypto_clear_masks()
2593 memset(&kvm->arch.crypto.crycb->apcb0, 0, in kvm_arch_crypto_clear_masks()
2594 sizeof(kvm->arch.crypto.crycb->apcb0)); in kvm_arch_crypto_clear_masks()
2595 memset(&kvm->arch.crypto.crycb->apcb1, 0, in kvm_arch_crypto_clear_masks()
2596 sizeof(kvm->arch.crypto.crycb->apcb1)); in kvm_arch_crypto_clear_masks()
2602 mutex_unlock(&kvm->lock); in kvm_arch_crypto_clear_masks()
2617 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb; in kvm_s390_crypto_init()
2624 kvm->arch.crypto.aes_kw = 1; in kvm_s390_crypto_init()
2625 kvm->arch.crypto.dea_kw = 1; in kvm_s390_crypto_init()
2626 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_crypto_init()
2627 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_crypto_init()
2628 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_crypto_init()
2629 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_crypto_init()
2634 if (kvm->arch.use_esca) in sca_dispose()
2635 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block)); in sca_dispose()
2637 free_page((unsigned long)(kvm->arch.sca)); in sca_dispose()
2638 kvm->arch.sca = NULL; in sca_dispose()
2648 rc = -EINVAL; in kvm_arch_init_vm()
2663 rc = -ENOMEM; in kvm_arch_init_vm()
2667 rwlock_init(&kvm->arch.sca_lock); in kvm_arch_init_vm()
2669 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags); in kvm_arch_init_vm()
2670 if (!kvm->arch.sca) in kvm_arch_init_vm()
2676 kvm->arch.sca = (struct bsca_block *) in kvm_arch_init_vm()
2677 ((char *) kvm->arch.sca + sca_offset); in kvm_arch_init_vm()
2680 sprintf(debug_name, "kvm-%u", current->pid); in kvm_arch_init_vm()
2682 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long)); in kvm_arch_init_vm()
2683 if (!kvm->arch.dbf) in kvm_arch_init_vm()
2687 kvm->arch.sie_page2 = in kvm_arch_init_vm()
2689 if (!kvm->arch.sie_page2) in kvm_arch_init_vm()
2692 kvm->arch.sie_page2->kvm = kvm; in kvm_arch_init_vm()
2693 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list; in kvm_arch_init_vm()
2696 kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] & in kvm_arch_init_vm()
2699 kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] & in kvm_arch_init_vm()
2702 kvm->arch.model.subfuncs = kvm_s390_available_subfunc; in kvm_arch_init_vm()
2704 /* we are always in czam mode - even on pre z14 machines */ in kvm_arch_init_vm()
2705 set_kvm_facility(kvm->arch.model.fac_mask, 138); in kvm_arch_init_vm()
2706 set_kvm_facility(kvm->arch.model.fac_list, 138); in kvm_arch_init_vm()
2708 set_kvm_facility(kvm->arch.model.fac_mask, 74); in kvm_arch_init_vm()
2709 set_kvm_facility(kvm->arch.model.fac_list, 74); in kvm_arch_init_vm()
2711 set_kvm_facility(kvm->arch.model.fac_mask, 147); in kvm_arch_init_vm()
2712 set_kvm_facility(kvm->arch.model.fac_list, 147); in kvm_arch_init_vm()
2716 set_kvm_facility(kvm->arch.model.fac_mask, 65); in kvm_arch_init_vm()
2718 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid(); in kvm_arch_init_vm()
2719 kvm->arch.model.ibc = sclp.ibc & 0x0fff; in kvm_arch_init_vm()
2723 mutex_init(&kvm->arch.float_int.ais_lock); in kvm_arch_init_vm()
2724 spin_lock_init(&kvm->arch.float_int.lock); in kvm_arch_init_vm()
2726 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]); in kvm_arch_init_vm()
2727 init_waitqueue_head(&kvm->arch.ipte_wq); in kvm_arch_init_vm()
2728 mutex_init(&kvm->arch.ipte_mutex); in kvm_arch_init_vm()
2730 debug_register_view(kvm->arch.dbf, &debug_sprintf_view); in kvm_arch_init_vm()
2734 kvm->arch.gmap = NULL; in kvm_arch_init_vm()
2735 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT; in kvm_arch_init_vm()
2738 kvm->arch.mem_limit = TASK_SIZE_MAX; in kvm_arch_init_vm()
2740 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX, in kvm_arch_init_vm()
2742 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1); in kvm_arch_init_vm()
2743 if (!kvm->arch.gmap) in kvm_arch_init_vm()
2745 kvm->arch.gmap->private = kvm; in kvm_arch_init_vm()
2746 kvm->arch.gmap->pfault_enabled = 0; in kvm_arch_init_vm()
2749 kvm->arch.use_pfmfi = sclp.has_pfmfi; in kvm_arch_init_vm()
2750 kvm->arch.use_skf = sclp.has_skey; in kvm_arch_init_vm()
2751 spin_lock_init(&kvm->arch.start_stop_lock); in kvm_arch_init_vm()
2755 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid); in kvm_arch_init_vm()
2759 free_page((unsigned long)kvm->arch.sie_page2); in kvm_arch_init_vm()
2760 debug_unregister(kvm->arch.dbf); in kvm_arch_init_vm()
2771 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id); in kvm_arch_vcpu_destroy()
2774 if (!kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_destroy()
2777 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_destroy()
2778 gmap_remove(vcpu->arch.gmap); in kvm_arch_vcpu_destroy()
2780 if (vcpu->kvm->arch.use_cmma) in kvm_arch_vcpu_destroy()
2785 free_page((unsigned long)(vcpu->arch.sie_block)); in kvm_arch_vcpu_destroy()
2796 mutex_lock(&kvm->lock); in kvm_free_vcpus()
2797 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) in kvm_free_vcpus()
2798 kvm->vcpus[i] = NULL; in kvm_free_vcpus()
2800 atomic_set(&kvm->online_vcpus, 0); in kvm_free_vcpus()
2801 mutex_unlock(&kvm->lock); in kvm_free_vcpus()
2812 * We are already at the end of life and kvm->lock is not taken. in kvm_arch_destroy_vm()
2819 debug_unregister(kvm->arch.dbf); in kvm_arch_destroy_vm()
2820 free_page((unsigned long)kvm->arch.sie_page2); in kvm_arch_destroy_vm()
2822 gmap_remove(kvm->arch.gmap); in kvm_arch_destroy_vm()
2832 vcpu->arch.gmap = gmap_create(current->mm, -1UL); in __kvm_ucontrol_vcpu_init()
2833 if (!vcpu->arch.gmap) in __kvm_ucontrol_vcpu_init()
2834 return -ENOMEM; in __kvm_ucontrol_vcpu_init()
2835 vcpu->arch.gmap->private = vcpu->kvm; in __kvm_ucontrol_vcpu_init()
2844 read_lock(&vcpu->kvm->arch.sca_lock); in sca_del_vcpu()
2845 if (vcpu->kvm->arch.use_esca) { in sca_del_vcpu()
2846 struct esca_block *sca = vcpu->kvm->arch.sca; in sca_del_vcpu()
2848 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn); in sca_del_vcpu()
2849 sca->cpu[vcpu->vcpu_id].sda = 0; in sca_del_vcpu()
2851 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_del_vcpu()
2853 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn); in sca_del_vcpu()
2854 sca->cpu[vcpu->vcpu_id].sda = 0; in sca_del_vcpu()
2856 read_unlock(&vcpu->kvm->arch.sca_lock); in sca_del_vcpu()
2862 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
2865 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); in sca_add_vcpu()
2866 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca; in sca_add_vcpu()
2869 read_lock(&vcpu->kvm->arch.sca_lock); in sca_add_vcpu()
2870 if (vcpu->kvm->arch.use_esca) { in sca_add_vcpu()
2871 struct esca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
2873 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block; in sca_add_vcpu()
2874 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); in sca_add_vcpu()
2875 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU; in sca_add_vcpu()
2876 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA; in sca_add_vcpu()
2877 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn); in sca_add_vcpu()
2879 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
2881 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block; in sca_add_vcpu()
2882 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); in sca_add_vcpu()
2883 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca; in sca_add_vcpu()
2884 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn); in sca_add_vcpu()
2886 read_unlock(&vcpu->kvm->arch.sca_lock); in sca_add_vcpu()
2892 d->sda = s->sda; in sca_copy_entry()
2893 d->sigp_ctrl.c = s->sigp_ctrl.c; in sca_copy_entry()
2894 d->sigp_ctrl.scn = s->sigp_ctrl.scn; in sca_copy_entry()
2901 d->ipte_control = s->ipte_control; in sca_copy_b_to_e()
2902 d->mcn[0] = s->mcn; in sca_copy_b_to_e()
2904 sca_copy_entry(&d->cpu[i], &s->cpu[i]); in sca_copy_b_to_e()
2909 struct bsca_block *old_sca = kvm->arch.sca; in sca_switch_to_extended()
2915 if (kvm->arch.use_esca) in sca_switch_to_extended()
2920 return -ENOMEM; in sca_switch_to_extended()
2926 write_lock(&kvm->arch.sca_lock); in sca_switch_to_extended()
2931 vcpu->arch.sie_block->scaoh = scaoh; in sca_switch_to_extended()
2932 vcpu->arch.sie_block->scaol = scaol; in sca_switch_to_extended()
2933 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA; in sca_switch_to_extended()
2935 kvm->arch.sca = new_sca; in sca_switch_to_extended()
2936 kvm->arch.use_esca = 1; in sca_switch_to_extended()
2938 write_unlock(&kvm->arch.sca_lock); in sca_switch_to_extended()
2943 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)", in sca_switch_to_extended()
2944 old_sca, kvm->arch.sca); in sca_switch_to_extended()
2962 mutex_lock(&kvm->lock); in sca_can_add_vcpu()
2963 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm); in sca_can_add_vcpu()
2964 mutex_unlock(&kvm->lock); in sca_can_add_vcpu()
2972 WARN_ON_ONCE(vcpu->arch.cputm_start != 0); in __start_cpu_timer_accounting()
2973 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); in __start_cpu_timer_accounting()
2974 vcpu->arch.cputm_start = get_tod_clock_fast(); in __start_cpu_timer_accounting()
2975 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); in __start_cpu_timer_accounting()
2981 WARN_ON_ONCE(vcpu->arch.cputm_start == 0); in __stop_cpu_timer_accounting()
2982 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); in __stop_cpu_timer_accounting()
2983 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start; in __stop_cpu_timer_accounting()
2984 vcpu->arch.cputm_start = 0; in __stop_cpu_timer_accounting()
2985 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); in __stop_cpu_timer_accounting()
2991 WARN_ON_ONCE(vcpu->arch.cputm_enabled); in __enable_cpu_timer_accounting()
2992 vcpu->arch.cputm_enabled = true; in __enable_cpu_timer_accounting()
2999 WARN_ON_ONCE(!vcpu->arch.cputm_enabled); in __disable_cpu_timer_accounting()
3001 vcpu->arch.cputm_enabled = false; in __disable_cpu_timer_accounting()
3018 /* set the cpu timer - may only be called from the VCPU thread itself */
3022 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); in kvm_s390_set_cpu_timer()
3023 if (vcpu->arch.cputm_enabled) in kvm_s390_set_cpu_timer()
3024 vcpu->arch.cputm_start = get_tod_clock_fast(); in kvm_s390_set_cpu_timer()
3025 vcpu->arch.sie_block->cputm = cputm; in kvm_s390_set_cpu_timer()
3026 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); in kvm_s390_set_cpu_timer()
3030 /* update and get the cpu timer - can also be called from other VCPU threads */
3036 if (unlikely(!vcpu->arch.cputm_enabled)) in kvm_s390_get_cpu_timer()
3037 return vcpu->arch.sie_block->cputm; in kvm_s390_get_cpu_timer()
3041 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount); in kvm_s390_get_cpu_timer()
3046 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu); in kvm_s390_get_cpu_timer()
3047 value = vcpu->arch.sie_block->cputm; in kvm_s390_get_cpu_timer()
3049 if (likely(vcpu->arch.cputm_start)) in kvm_s390_get_cpu_timer()
3050 value -= get_tod_clock_fast() - vcpu->arch.cputm_start; in kvm_s390_get_cpu_timer()
3051 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1)); in kvm_s390_get_cpu_timer()
3059 gmap_enable(vcpu->arch.enabled_gmap); in kvm_arch_vcpu_load()
3061 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) in kvm_arch_vcpu_load()
3063 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
3068 vcpu->cpu = -1; in kvm_arch_vcpu_put()
3069 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) in kvm_arch_vcpu_put()
3072 vcpu->arch.enabled_gmap = gmap_get_enabled(); in kvm_arch_vcpu_put()
3073 gmap_disable(vcpu->arch.enabled_gmap); in kvm_arch_vcpu_put()
3079 mutex_lock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
3081 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; in kvm_arch_vcpu_postcreate()
3082 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx; in kvm_arch_vcpu_postcreate()
3084 mutex_unlock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
3085 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_postcreate()
3086 vcpu->arch.gmap = vcpu->kvm->arch.gmap; in kvm_arch_vcpu_postcreate()
3089 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0) in kvm_arch_vcpu_postcreate()
3090 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC; in kvm_arch_vcpu_postcreate()
3092 vcpu->arch.enabled_gmap = vcpu->arch.gmap; in kvm_arch_vcpu_postcreate()
3097 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) && in kvm_has_pckmo_subfunc()
3120 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76)) in kvm_s390_vcpu_crypto_setup()
3123 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; in kvm_s390_vcpu_crypto_setup()
3124 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA); in kvm_s390_vcpu_crypto_setup()
3125 vcpu->arch.sie_block->eca &= ~ECA_APIE; in kvm_s390_vcpu_crypto_setup()
3126 vcpu->arch.sie_block->ecd &= ~ECD_ECC; in kvm_s390_vcpu_crypto_setup()
3128 if (vcpu->kvm->arch.crypto.apie) in kvm_s390_vcpu_crypto_setup()
3129 vcpu->arch.sie_block->eca |= ECA_APIE; in kvm_s390_vcpu_crypto_setup()
3132 if (vcpu->kvm->arch.crypto.aes_kw) { in kvm_s390_vcpu_crypto_setup()
3133 vcpu->arch.sie_block->ecb3 |= ECB3_AES; in kvm_s390_vcpu_crypto_setup()
3135 if (kvm_has_pckmo_ecc(vcpu->kvm)) in kvm_s390_vcpu_crypto_setup()
3136 vcpu->arch.sie_block->ecd |= ECD_ECC; in kvm_s390_vcpu_crypto_setup()
3139 if (vcpu->kvm->arch.crypto.dea_kw) in kvm_s390_vcpu_crypto_setup()
3140 vcpu->arch.sie_block->ecb3 |= ECB3_DEA; in kvm_s390_vcpu_crypto_setup()
3145 free_page(vcpu->arch.sie_block->cbrlo); in kvm_s390_vcpu_unsetup_cmma()
3146 vcpu->arch.sie_block->cbrlo = 0; in kvm_s390_vcpu_unsetup_cmma()
3151 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL); in kvm_s390_vcpu_setup_cmma()
3152 if (!vcpu->arch.sie_block->cbrlo) in kvm_s390_vcpu_setup_cmma()
3153 return -ENOMEM; in kvm_s390_vcpu_setup_cmma()
3159 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model; in kvm_s390_vcpu_setup_model()
3161 vcpu->arch.sie_block->ibc = model->ibc; in kvm_s390_vcpu_setup_model()
3162 if (test_kvm_facility(vcpu->kvm, 7)) in kvm_s390_vcpu_setup_model()
3163 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list; in kvm_s390_vcpu_setup_model()
3171 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | in kvm_s390_vcpu_setup()
3175 if (test_kvm_facility(vcpu->kvm, 78)) in kvm_s390_vcpu_setup()
3177 else if (test_kvm_facility(vcpu->kvm, 8)) in kvm_s390_vcpu_setup()
3184 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT; in kvm_s390_vcpu_setup()
3185 if (test_kvm_facility(vcpu->kvm, 9)) in kvm_s390_vcpu_setup()
3186 vcpu->arch.sie_block->ecb |= ECB_SRSI; in kvm_s390_vcpu_setup()
3187 if (test_kvm_facility(vcpu->kvm, 73)) in kvm_s390_vcpu_setup()
3188 vcpu->arch.sie_block->ecb |= ECB_TE; in kvm_s390_vcpu_setup()
3190 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi) in kvm_s390_vcpu_setup()
3191 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI; in kvm_s390_vcpu_setup()
3192 if (test_kvm_facility(vcpu->kvm, 130)) in kvm_s390_vcpu_setup()
3193 vcpu->arch.sie_block->ecb2 |= ECB2_IEP; in kvm_s390_vcpu_setup()
3194 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI; in kvm_s390_vcpu_setup()
3196 vcpu->arch.sie_block->eca |= ECA_CEI; in kvm_s390_vcpu_setup()
3198 vcpu->arch.sie_block->eca |= ECA_IB; in kvm_s390_vcpu_setup()
3200 vcpu->arch.sie_block->eca |= ECA_SII; in kvm_s390_vcpu_setup()
3202 vcpu->arch.sie_block->eca |= ECA_SIGPI; in kvm_s390_vcpu_setup()
3203 if (test_kvm_facility(vcpu->kvm, 129)) { in kvm_s390_vcpu_setup()
3204 vcpu->arch.sie_block->eca |= ECA_VX; in kvm_s390_vcpu_setup()
3205 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; in kvm_s390_vcpu_setup()
3207 if (test_kvm_facility(vcpu->kvm, 139)) in kvm_s390_vcpu_setup()
3208 vcpu->arch.sie_block->ecd |= ECD_MEF; in kvm_s390_vcpu_setup()
3209 if (test_kvm_facility(vcpu->kvm, 156)) in kvm_s390_vcpu_setup()
3210 vcpu->arch.sie_block->ecd |= ECD_ETOKENF; in kvm_s390_vcpu_setup()
3211 if (vcpu->arch.sie_block->gd) { in kvm_s390_vcpu_setup()
3212 vcpu->arch.sie_block->eca |= ECA_AIV; in kvm_s390_vcpu_setup()
3213 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u", in kvm_s390_vcpu_setup()
3214 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id); in kvm_s390_vcpu_setup()
3216 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx) in kvm_s390_vcpu_setup()
3218 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb; in kvm_s390_vcpu_setup()
3223 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; in kvm_s390_vcpu_setup()
3225 if (vcpu->kvm->arch.use_cmma) { in kvm_s390_vcpu_setup()
3230 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in kvm_s390_vcpu_setup()
3231 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; in kvm_s390_vcpu_setup()
3233 vcpu->arch.sie_block->hpid = HPID_KVM; in kvm_s390_vcpu_setup()
3237 mutex_lock(&vcpu->kvm->lock); in kvm_s390_vcpu_setup()
3238 if (kvm_s390_pv_is_protected(vcpu->kvm)) { in kvm_s390_vcpu_setup()
3243 mutex_unlock(&vcpu->kvm->lock); in kvm_s390_vcpu_setup()
3251 return -EINVAL; in kvm_arch_vcpu_precreate()
3263 return -ENOMEM; in kvm_arch_vcpu_create()
3265 vcpu->arch.sie_block = &sie_page->sie_block; in kvm_arch_vcpu_create()
3266 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; in kvm_arch_vcpu_create()
3269 vcpu->arch.sie_block->mso = 0; in kvm_arch_vcpu_create()
3270 vcpu->arch.sie_block->msl = sclp.hamax; in kvm_arch_vcpu_create()
3272 vcpu->arch.sie_block->icpua = vcpu->vcpu_id; in kvm_arch_vcpu_create()
3273 spin_lock_init(&vcpu->arch.local_int.lock); in kvm_arch_vcpu_create()
3274 vcpu->arch.sie_block->gd = (u32)(u64)vcpu->kvm->arch.gisa_int.origin; in kvm_arch_vcpu_create()
3275 if (vcpu->arch.sie_block->gd && sclp.has_gisaf) in kvm_arch_vcpu_create()
3276 vcpu->arch.sie_block->gd |= GISA_FORMAT1; in kvm_arch_vcpu_create()
3277 seqcount_init(&vcpu->arch.cputm_seqcount); in kvm_arch_vcpu_create()
3279 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; in kvm_arch_vcpu_create()
3281 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | in kvm_arch_vcpu_create()
3289 if (test_kvm_facility(vcpu->kvm, 64)) in kvm_arch_vcpu_create()
3290 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB; in kvm_arch_vcpu_create()
3291 if (test_kvm_facility(vcpu->kvm, 82)) in kvm_arch_vcpu_create()
3292 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC; in kvm_arch_vcpu_create()
3293 if (test_kvm_facility(vcpu->kvm, 133)) in kvm_arch_vcpu_create()
3294 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB; in kvm_arch_vcpu_create()
3295 if (test_kvm_facility(vcpu->kvm, 156)) in kvm_arch_vcpu_create()
3296 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN; in kvm_arch_vcpu_create()
3301 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS; in kvm_arch_vcpu_create()
3303 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS; in kvm_arch_vcpu_create()
3305 if (kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_create()
3311 VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", in kvm_arch_vcpu_create()
3312 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block); in kvm_arch_vcpu_create()
3313 trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block); in kvm_arch_vcpu_create()
3321 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_create()
3322 gmap_remove(vcpu->arch.gmap); in kvm_arch_vcpu_create()
3324 free_page((unsigned long)(vcpu->arch.sie_block)); in kvm_arch_vcpu_create()
3330 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask); in kvm_arch_vcpu_runnable()
3336 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE); in kvm_arch_vcpu_in_kernel()
3341 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_block()
3347 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_unblock()
3352 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_request()
3358 return atomic_read(&vcpu->arch.sie_block->prog20) & in kvm_s390_vcpu_sie_inhibited()
3364 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_request_handled()
3375 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) in exit_sie()
3389 struct kvm *kvm = gmap->private; in kvm_gmap_notifier()
3402 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) { in kvm_gmap_notifier()
3403 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx", in kvm_gmap_notifier()
3415 vcpu->stat.halt_no_poll_steal++; in kvm_arch_no_poll()
3431 int r = -EINVAL; in kvm_arch_vcpu_ioctl_get_one_reg()
3433 switch (reg->id) { in kvm_arch_vcpu_ioctl_get_one_reg()
3435 r = put_user(vcpu->arch.sie_block->todpr, in kvm_arch_vcpu_ioctl_get_one_reg()
3436 (u32 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
3439 r = put_user(vcpu->arch.sie_block->epoch, in kvm_arch_vcpu_ioctl_get_one_reg()
3440 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
3444 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
3447 r = put_user(vcpu->arch.sie_block->ckc, in kvm_arch_vcpu_ioctl_get_one_reg()
3448 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
3451 r = put_user(vcpu->arch.pfault_token, in kvm_arch_vcpu_ioctl_get_one_reg()
3452 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
3455 r = put_user(vcpu->arch.pfault_compare, in kvm_arch_vcpu_ioctl_get_one_reg()
3456 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
3459 r = put_user(vcpu->arch.pfault_select, in kvm_arch_vcpu_ioctl_get_one_reg()
3460 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
3463 r = put_user(vcpu->arch.sie_block->pp, in kvm_arch_vcpu_ioctl_get_one_reg()
3464 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
3467 r = put_user(vcpu->arch.sie_block->gbea, in kvm_arch_vcpu_ioctl_get_one_reg()
3468 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
3480 int r = -EINVAL; in kvm_arch_vcpu_ioctl_set_one_reg()
3483 switch (reg->id) { in kvm_arch_vcpu_ioctl_set_one_reg()
3485 r = get_user(vcpu->arch.sie_block->todpr, in kvm_arch_vcpu_ioctl_set_one_reg()
3486 (u32 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
3489 r = get_user(vcpu->arch.sie_block->epoch, in kvm_arch_vcpu_ioctl_set_one_reg()
3490 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
3493 r = get_user(val, (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
3498 r = get_user(vcpu->arch.sie_block->ckc, in kvm_arch_vcpu_ioctl_set_one_reg()
3499 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
3502 r = get_user(vcpu->arch.pfault_token, in kvm_arch_vcpu_ioctl_set_one_reg()
3503 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
3504 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in kvm_arch_vcpu_ioctl_set_one_reg()
3508 r = get_user(vcpu->arch.pfault_compare, in kvm_arch_vcpu_ioctl_set_one_reg()
3509 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
3512 r = get_user(vcpu->arch.pfault_select, in kvm_arch_vcpu_ioctl_set_one_reg()
3513 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
3516 r = get_user(vcpu->arch.sie_block->pp, in kvm_arch_vcpu_ioctl_set_one_reg()
3517 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
3520 r = get_user(vcpu->arch.sie_block->gbea, in kvm_arch_vcpu_ioctl_set_one_reg()
3521 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
3532 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI; in kvm_arch_vcpu_ioctl_normal_reset()
3533 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; in kvm_arch_vcpu_ioctl_normal_reset()
3534 memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb)); in kvm_arch_vcpu_ioctl_normal_reset()
3537 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) in kvm_arch_vcpu_ioctl_normal_reset()
3551 vcpu->arch.sie_block->gpsw.mask = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3552 vcpu->arch.sie_block->gpsw.addr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3555 vcpu->arch.sie_block->ckc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3556 memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr)); in kvm_arch_vcpu_ioctl_initial_reset()
3557 vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
3558 vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
3561 memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs)); in kvm_arch_vcpu_ioctl_initial_reset()
3562 vcpu->run->s.regs.ckc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3563 vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
3564 vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
3565 vcpu->run->psw_addr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3566 vcpu->run->psw_mask = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3567 vcpu->run->s.regs.todpr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3568 vcpu->run->s.regs.cputm = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3569 vcpu->run->s.regs.ckc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3570 vcpu->run->s.regs.pp = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3571 vcpu->run->s.regs.gbea = 1; in kvm_arch_vcpu_ioctl_initial_reset()
3572 vcpu->run->s.regs.fpc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3579 vcpu->arch.sie_block->gbea = 1; in kvm_arch_vcpu_ioctl_initial_reset()
3580 vcpu->arch.sie_block->pp = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3581 vcpu->arch.sie_block->fpf &= ~FPF_BPBC; in kvm_arch_vcpu_ioctl_initial_reset()
3582 vcpu->arch.sie_block->todpr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3588 struct kvm_sync_regs *regs = &vcpu->run->s.regs; in kvm_arch_vcpu_ioctl_clear_reset()
3593 memset(®s->gprs, 0, sizeof(regs->gprs)); in kvm_arch_vcpu_ioctl_clear_reset()
3594 memset(®s->vrs, 0, sizeof(regs->vrs)); in kvm_arch_vcpu_ioctl_clear_reset()
3595 memset(®s->acrs, 0, sizeof(regs->acrs)); in kvm_arch_vcpu_ioctl_clear_reset()
3596 memset(®s->gscb, 0, sizeof(regs->gscb)); in kvm_arch_vcpu_ioctl_clear_reset()
3598 regs->etoken = 0; in kvm_arch_vcpu_ioctl_clear_reset()
3599 regs->etoken_extension = 0; in kvm_arch_vcpu_ioctl_clear_reset()
3605 memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs)); in kvm_arch_vcpu_ioctl_set_regs()
3613 memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs)); in kvm_arch_vcpu_ioctl_get_regs()
3623 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs)); in kvm_arch_vcpu_ioctl_set_sregs()
3624 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); in kvm_arch_vcpu_ioctl_set_sregs()
3635 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs)); in kvm_arch_vcpu_ioctl_get_sregs()
3636 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); in kvm_arch_vcpu_ioctl_get_sregs()
3648 if (test_fp_ctl(fpu->fpc)) { in kvm_arch_vcpu_ioctl_set_fpu()
3649 ret = -EINVAL; in kvm_arch_vcpu_ioctl_set_fpu()
3652 vcpu->run->s.regs.fpc = fpu->fpc; in kvm_arch_vcpu_ioctl_set_fpu()
3654 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs, in kvm_arch_vcpu_ioctl_set_fpu()
3655 (freg_t *) fpu->fprs); in kvm_arch_vcpu_ioctl_set_fpu()
3657 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs)); in kvm_arch_vcpu_ioctl_set_fpu()
3671 convert_vx_to_fp((freg_t *) fpu->fprs, in kvm_arch_vcpu_ioctl_get_fpu()
3672 (__vector128 *) vcpu->run->s.regs.vrs); in kvm_arch_vcpu_ioctl_get_fpu()
3674 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs)); in kvm_arch_vcpu_ioctl_get_fpu()
3675 fpu->fpc = vcpu->run->s.regs.fpc; in kvm_arch_vcpu_ioctl_get_fpu()
3686 rc = -EBUSY; in kvm_arch_vcpu_ioctl_set_initial_psw()
3688 vcpu->run->psw_mask = psw.mask; in kvm_arch_vcpu_ioctl_set_initial_psw()
3689 vcpu->run->psw_addr = psw.addr; in kvm_arch_vcpu_ioctl_set_initial_psw()
3697 return -EINVAL; /* not implemented yet */ in kvm_arch_vcpu_ioctl_translate()
3711 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
3714 if (dbg->control & ~VALID_GUESTDBG_FLAGS) { in kvm_arch_vcpu_ioctl_set_guest_debug()
3715 rc = -EINVAL; in kvm_arch_vcpu_ioctl_set_guest_debug()
3719 rc = -EINVAL; in kvm_arch_vcpu_ioctl_set_guest_debug()
3723 if (dbg->control & KVM_GUESTDBG_ENABLE) { in kvm_arch_vcpu_ioctl_set_guest_debug()
3724 vcpu->guest_debug = dbg->control; in kvm_arch_vcpu_ioctl_set_guest_debug()
3728 if (dbg->control & KVM_GUESTDBG_USE_HW_BP) in kvm_arch_vcpu_ioctl_set_guest_debug()
3732 vcpu->arch.guestdbg.last_bp = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
3736 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
3768 /* user space knows about this interface - let it control the state */ in kvm_arch_vcpu_ioctl_set_mpstate()
3769 vcpu->kvm->arch.user_cpu_state_ctrl = 1; in kvm_arch_vcpu_ioctl_set_mpstate()
3771 switch (mp_state->mp_state) { in kvm_arch_vcpu_ioctl_set_mpstate()
3780 rc = -ENXIO; in kvm_arch_vcpu_ioctl_set_mpstate()
3788 rc = -ENXIO; in kvm_arch_vcpu_ioctl_set_mpstate()
3807 * We use MMU_RELOAD just to re-arm the ipte notifier for the in kvm_s390_handle_requests()
3815 rc = gmap_mprotect_notify(vcpu->arch.gmap, in kvm_s390_handle_requests()
3826 vcpu->arch.sie_block->ihcpu = 0xffff; in kvm_s390_handle_requests()
3832 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); in kvm_s390_handle_requests()
3840 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); in kvm_s390_handle_requests()
3847 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC; in kvm_s390_handle_requests()
3857 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA; in kvm_s390_handle_requests()
3863 * Re-enable CMM virtualization if CMMA is available and in kvm_s390_handle_requests()
3866 if ((vcpu->kvm->arch.use_cmma) && in kvm_s390_handle_requests()
3867 (vcpu->kvm->mm->context.uses_cmm)) in kvm_s390_handle_requests()
3868 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA; in kvm_s390_handle_requests()
3890 kvm->arch.epoch = gtod->tod - htod.tod; in __kvm_s390_set_tod_clock()
3891 kvm->arch.epdx = 0; in __kvm_s390_set_tod_clock()
3893 kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx; in __kvm_s390_set_tod_clock()
3894 if (kvm->arch.epoch > gtod->tod) in __kvm_s390_set_tod_clock()
3895 kvm->arch.epdx -= 1; in __kvm_s390_set_tod_clock()
3900 vcpu->arch.sie_block->epoch = kvm->arch.epoch; in __kvm_s390_set_tod_clock()
3901 vcpu->arch.sie_block->epdx = kvm->arch.epdx; in __kvm_s390_set_tod_clock()
3910 if (!mutex_trylock(&kvm->lock)) in kvm_s390_try_set_tod_clock()
3913 mutex_unlock(&kvm->lock); in kvm_s390_try_set_tod_clock()
3918 * kvm_arch_fault_in_page - fault-in guest page if necessary
3923 * Make sure that a guest page has been faulted-in on the host.
3929 return gmap_fault(vcpu->arch.gmap, gpa, in kvm_arch_fault_in_page()
3946 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); in __kvm_inject_pfault_token()
3953 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token); in kvm_arch_async_page_not_present()
3954 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token); in kvm_arch_async_page_not_present()
3962 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token); in kvm_arch_async_page_present()
3963 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token); in kvm_arch_async_page_present()
3986 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in kvm_arch_setup_async_pf()
3988 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) != in kvm_arch_setup_async_pf()
3989 vcpu->arch.pfault_compare) in kvm_arch_setup_async_pf()
3995 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK)) in kvm_arch_setup_async_pf()
3997 if (!vcpu->arch.gmap->pfault_enabled) in kvm_arch_setup_async_pf()
4000 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr)); in kvm_arch_setup_async_pf()
4001 hva += current->thread.gmap_addr & ~PAGE_MASK; in kvm_arch_setup_async_pf()
4002 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8)) in kvm_arch_setup_async_pf()
4005 return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch); in kvm_arch_setup_async_pf()
4019 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14]; in vcpu_pre_run()
4020 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15]; in vcpu_pre_run()
4025 if (!kvm_is_ucontrol(vcpu->kvm)) { in vcpu_pre_run()
4040 clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.gisa_int.kicked_mask); in vcpu_pre_run()
4042 vcpu->arch.sie_block->icptcode = 0; in vcpu_pre_run()
4043 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); in vcpu_pre_run()
4069 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1); in vcpu_post_run_fault_in_sie()
4074 /* Instruction-Fetching Exceptions - we can't detect the ilen. in vcpu_post_run_fault_in_sie()
4078 pgm_info = vcpu->arch.pgm; in vcpu_post_run_fault_in_sie()
4092 vcpu->arch.sie_block->icptcode); in vcpu_post_run()
4093 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); in vcpu_post_run()
4098 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14; in vcpu_post_run()
4099 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15; in vcpu_post_run()
4101 if (exit_reason == -EINTR) { in vcpu_post_run()
4103 sie_page = container_of(vcpu->arch.sie_block, in vcpu_post_run()
4105 mcck_info = &sie_page->mcck_info; in vcpu_post_run()
4110 if (vcpu->arch.sie_block->icptcode > 0) { in vcpu_post_run()
4113 if (rc != -EOPNOTSUPP) in vcpu_post_run()
4115 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC; in vcpu_post_run()
4116 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; in vcpu_post_run()
4117 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; in vcpu_post_run()
4118 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; in vcpu_post_run()
4119 return -EREMOTE; in vcpu_post_run()
4120 } else if (exit_reason != -EFAULT) { in vcpu_post_run()
4121 vcpu->stat.exit_null++; in vcpu_post_run()
4123 } else if (kvm_is_ucontrol(vcpu->kvm)) { in vcpu_post_run()
4124 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL; in vcpu_post_run()
4125 vcpu->run->s390_ucontrol.trans_exc_code = in vcpu_post_run()
4126 current->thread.gmap_addr; in vcpu_post_run()
4127 vcpu->run->s390_ucontrol.pgm_code = 0x10; in vcpu_post_run()
4128 return -EREMOTE; in vcpu_post_run()
4129 } else if (current->thread.gmap_pfault) { in vcpu_post_run()
4131 current->thread.gmap_pfault = 0; in vcpu_post_run()
4134 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1); in vcpu_post_run()
4143 struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block; in __vcpu_run()
4146 * We try to hold kvm->srcu during most of vcpu_run (except when run- in __vcpu_run()
4149 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in __vcpu_run()
4156 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in __vcpu_run()
4166 memcpy(sie_page->pv_grregs, in __vcpu_run()
4167 vcpu->run->s.regs.gprs, in __vcpu_run()
4168 sizeof(sie_page->pv_grregs)); in __vcpu_run()
4170 exit_reason = sie64a(vcpu->arch.sie_block, in __vcpu_run()
4171 vcpu->run->s.regs.gprs); in __vcpu_run()
4173 memcpy(vcpu->run->s.regs.gprs, in __vcpu_run()
4174 sie_page->pv_grregs, in __vcpu_run()
4175 sizeof(sie_page->pv_grregs)); in __vcpu_run()
4178 * that leave the guest state in an "in-between" state in __vcpu_run()
4182 if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR || in __vcpu_run()
4183 vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) { in __vcpu_run()
4184 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK; in __vcpu_run()
4191 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in __vcpu_run()
4196 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in __vcpu_run()
4202 struct kvm_run *kvm_run = vcpu->run; in sync_regs_fmt2()
4206 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb; in sync_regs_fmt2()
4207 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb; in sync_regs_fmt2()
4208 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; in sync_regs_fmt2()
4209 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; in sync_regs_fmt2()
4210 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) { in sync_regs_fmt2()
4211 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr; in sync_regs_fmt2()
4212 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp; in sync_regs_fmt2()
4213 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea; in sync_regs_fmt2()
4215 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) { in sync_regs_fmt2()
4216 vcpu->arch.pfault_token = kvm_run->s.regs.pft; in sync_regs_fmt2()
4217 vcpu->arch.pfault_select = kvm_run->s.regs.pfs; in sync_regs_fmt2()
4218 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc; in sync_regs_fmt2()
4219 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in sync_regs_fmt2()
4222 if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) { in sync_regs_fmt2()
4223 vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318; in sync_regs_fmt2()
4224 vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc; in sync_regs_fmt2()
4230 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) && in sync_regs_fmt2()
4231 test_kvm_facility(vcpu->kvm, 64) && in sync_regs_fmt2()
4232 riccb->v && in sync_regs_fmt2()
4233 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) { in sync_regs_fmt2()
4235 vcpu->arch.sie_block->ecb3 |= ECB3_RI; in sync_regs_fmt2()
4238 * If userspace sets the gscb (e.g. after migration) to non-zero, in sync_regs_fmt2()
4241 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) && in sync_regs_fmt2()
4242 test_kvm_facility(vcpu->kvm, 133) && in sync_regs_fmt2()
4243 gscb->gssm && in sync_regs_fmt2()
4244 !vcpu->arch.gs_enabled) { in sync_regs_fmt2()
4246 vcpu->arch.sie_block->ecb |= ECB_GS; in sync_regs_fmt2()
4247 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; in sync_regs_fmt2()
4248 vcpu->arch.gs_enabled = 1; in sync_regs_fmt2()
4250 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) && in sync_regs_fmt2()
4251 test_kvm_facility(vcpu->kvm, 82)) { in sync_regs_fmt2()
4252 vcpu->arch.sie_block->fpf &= ~FPF_BPBC; in sync_regs_fmt2()
4253 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0; in sync_regs_fmt2()
4258 if (current->thread.gs_cb) { in sync_regs_fmt2()
4259 vcpu->arch.host_gscb = current->thread.gs_cb; in sync_regs_fmt2()
4260 save_gs_cb(vcpu->arch.host_gscb); in sync_regs_fmt2()
4262 if (vcpu->arch.gs_enabled) { in sync_regs_fmt2()
4263 current->thread.gs_cb = (struct gs_cb *) in sync_regs_fmt2()
4264 &vcpu->run->s.regs.gscb; in sync_regs_fmt2()
4265 restore_gs_cb(current->thread.gs_cb); in sync_regs_fmt2()
4274 struct kvm_run *kvm_run = vcpu->run; in sync_regs()
4276 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) in sync_regs()
4277 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); in sync_regs()
4278 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) { in sync_regs()
4279 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128); in sync_regs()
4283 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) { in sync_regs()
4284 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm); in sync_regs()
4285 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc; in sync_regs()
4287 save_access_regs(vcpu->arch.host_acrs); in sync_regs()
4288 restore_access_regs(vcpu->run->s.regs.acrs); in sync_regs()
4291 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc; in sync_regs()
4292 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs; in sync_regs()
4294 current->thread.fpu.regs = vcpu->run->s.regs.vrs; in sync_regs()
4296 current->thread.fpu.regs = vcpu->run->s.regs.fprs; in sync_regs()
4297 current->thread.fpu.fpc = vcpu->run->s.regs.fpc; in sync_regs()
4298 if (test_fp_ctl(current->thread.fpu.fpc)) in sync_regs()
4300 current->thread.fpu.fpc = 0; in sync_regs()
4315 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC; in sync_regs()
4316 vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask & in sync_regs()
4320 kvm_run->kvm_dirty_regs = 0; in sync_regs()
4325 struct kvm_run *kvm_run = vcpu->run; in store_regs_fmt2()
4327 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr; in store_regs_fmt2()
4328 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp; in store_regs_fmt2()
4329 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea; in store_regs_fmt2()
4330 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC; in store_regs_fmt2()
4331 kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val; in store_regs_fmt2()
4335 if (vcpu->arch.gs_enabled) in store_regs_fmt2()
4336 save_gs_cb(current->thread.gs_cb); in store_regs_fmt2()
4337 current->thread.gs_cb = vcpu->arch.host_gscb; in store_regs_fmt2()
4338 restore_gs_cb(vcpu->arch.host_gscb); in store_regs_fmt2()
4339 if (!vcpu->arch.host_gscb) in store_regs_fmt2()
4341 vcpu->arch.host_gscb = NULL; in store_regs_fmt2()
4349 struct kvm_run *kvm_run = vcpu->run; in store_regs()
4351 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; in store_regs()
4352 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; in store_regs()
4353 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu); in store_regs()
4354 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); in store_regs()
4355 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu); in store_regs()
4356 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc; in store_regs()
4357 kvm_run->s.regs.pft = vcpu->arch.pfault_token; in store_regs()
4358 kvm_run->s.regs.pfs = vcpu->arch.pfault_select; in store_regs()
4359 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; in store_regs()
4360 save_access_regs(vcpu->run->s.regs.acrs); in store_regs()
4361 restore_access_regs(vcpu->arch.host_acrs); in store_regs()
4364 vcpu->run->s.regs.fpc = current->thread.fpu.fpc; in store_regs()
4366 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc; in store_regs()
4367 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs; in store_regs()
4374 struct kvm_run *kvm_run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
4377 if (kvm_run->immediate_exit) in kvm_arch_vcpu_ioctl_run()
4378 return -EINTR; in kvm_arch_vcpu_ioctl_run()
4380 if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS || in kvm_arch_vcpu_ioctl_run()
4381 kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS) in kvm_arch_vcpu_ioctl_run()
4382 return -EINVAL; in kvm_arch_vcpu_ioctl_run()
4398 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { in kvm_arch_vcpu_ioctl_run()
4402 vcpu->vcpu_id); in kvm_arch_vcpu_ioctl_run()
4403 rc = -EINVAL; in kvm_arch_vcpu_ioctl_run()
4414 kvm_run->exit_reason = KVM_EXIT_INTR; in kvm_arch_vcpu_ioctl_run()
4415 rc = -EINTR; in kvm_arch_vcpu_ioctl_run()
4423 if (rc == -EREMOTE) { in kvm_arch_vcpu_ioctl_run()
4433 vcpu->stat.exit_userspace++; in kvm_arch_vcpu_ioctl_run()
4442 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
4443 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
4456 return -EFAULT; in kvm_s390_store_status_unloaded()
4460 return -EFAULT; in kvm_s390_store_status_unloaded()
4463 gpa -= __LC_FPREGS_SAVE_AREA; in kvm_s390_store_status_unloaded()
4467 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs); in kvm_s390_store_status_unloaded()
4472 vcpu->run->s.regs.fprs, 128); in kvm_s390_store_status_unloaded()
4475 vcpu->run->s.regs.gprs, 128); in kvm_s390_store_status_unloaded()
4477 &vcpu->arch.sie_block->gpsw, 16); in kvm_s390_store_status_unloaded()
4481 &vcpu->run->s.regs.fpc, 4); in kvm_s390_store_status_unloaded()
4483 &vcpu->arch.sie_block->todpr, 4); in kvm_s390_store_status_unloaded()
4487 clkcomp = vcpu->arch.sie_block->ckc >> 8; in kvm_s390_store_status_unloaded()
4491 &vcpu->run->s.regs.acrs, 64); in kvm_s390_store_status_unloaded()
4493 &vcpu->arch.sie_block->gcr, 128); in kvm_s390_store_status_unloaded()
4494 return rc ? -EFAULT : 0; in kvm_s390_store_status_unloaded()
4505 vcpu->run->s.regs.fpc = current->thread.fpu.fpc; in kvm_s390_vcpu_store_status()
4506 save_access_regs(vcpu->run->s.regs.acrs); in kvm_s390_vcpu_store_status()
4542 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1); in kvm_s390_vcpu_start()
4544 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
4545 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_start()
4551 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
4557 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) in kvm_s390_vcpu_start()
4562 /* we're the only active VCPU -> speed it up */ in kvm_s390_vcpu_start()
4570 __disable_ibs_on_all_vcpus(vcpu->kvm); in kvm_s390_vcpu_start()
4580 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK; in kvm_s390_vcpu_start()
4586 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
4598 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0); in kvm_s390_vcpu_stop()
4600 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
4601 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_stop()
4607 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
4624 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) { in kvm_s390_vcpu_stop()
4626 started_vcpu = vcpu->kvm->vcpus[i]; in kvm_s390_vcpu_stop()
4638 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
4647 if (cap->flags) in kvm_vcpu_ioctl_enable_cap()
4648 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
4650 switch (cap->cap) { in kvm_vcpu_ioctl_enable_cap()
4652 if (!vcpu->kvm->arch.css_support) { in kvm_vcpu_ioctl_enable_cap()
4653 vcpu->kvm->arch.css_support = 1; in kvm_vcpu_ioctl_enable_cap()
4654 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support"); in kvm_vcpu_ioctl_enable_cap()
4655 trace_kvm_s390_enable_css(vcpu->kvm); in kvm_vcpu_ioctl_enable_cap()
4660 r = -EINVAL; in kvm_vcpu_ioctl_enable_cap()
4669 void __user *uaddr = (void __user *)mop->buf; in kvm_s390_guest_sida_op()
4672 if (mop->flags || !mop->size) in kvm_s390_guest_sida_op()
4673 return -EINVAL; in kvm_s390_guest_sida_op()
4674 if (mop->size + mop->sida_offset < mop->size) in kvm_s390_guest_sida_op()
4675 return -EINVAL; in kvm_s390_guest_sida_op()
4676 if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block)) in kvm_s390_guest_sida_op()
4677 return -E2BIG; in kvm_s390_guest_sida_op()
4679 return -EINVAL; in kvm_s390_guest_sida_op()
4681 switch (mop->op) { in kvm_s390_guest_sida_op()
4683 if (copy_to_user(uaddr, (void *)(sida_origin(vcpu->arch.sie_block) + in kvm_s390_guest_sida_op()
4684 mop->sida_offset), mop->size)) in kvm_s390_guest_sida_op()
4685 r = -EFAULT; in kvm_s390_guest_sida_op()
4689 if (copy_from_user((void *)(sida_origin(vcpu->arch.sie_block) + in kvm_s390_guest_sida_op()
4690 mop->sida_offset), uaddr, mop->size)) in kvm_s390_guest_sida_op()
4691 r = -EFAULT; in kvm_s390_guest_sida_op()
4699 void __user *uaddr = (void __user *)mop->buf; in kvm_s390_guest_mem_op()
4705 if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size) in kvm_s390_guest_mem_op()
4706 return -EINVAL; in kvm_s390_guest_mem_op()
4708 if (mop->size > MEM_OP_MAX_SIZE) in kvm_s390_guest_mem_op()
4709 return -E2BIG; in kvm_s390_guest_mem_op()
4712 return -EINVAL; in kvm_s390_guest_mem_op()
4714 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) { in kvm_s390_guest_mem_op()
4715 tmpbuf = vmalloc(mop->size); in kvm_s390_guest_mem_op()
4717 return -ENOMEM; in kvm_s390_guest_mem_op()
4720 switch (mop->op) { in kvm_s390_guest_mem_op()
4722 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { in kvm_s390_guest_mem_op()
4723 r = check_gva_range(vcpu, mop->gaddr, mop->ar, in kvm_s390_guest_mem_op()
4724 mop->size, GACC_FETCH); in kvm_s390_guest_mem_op()
4727 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size); in kvm_s390_guest_mem_op()
4729 if (copy_to_user(uaddr, tmpbuf, mop->size)) in kvm_s390_guest_mem_op()
4730 r = -EFAULT; in kvm_s390_guest_mem_op()
4734 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { in kvm_s390_guest_mem_op()
4735 r = check_gva_range(vcpu, mop->gaddr, mop->ar, in kvm_s390_guest_mem_op()
4736 mop->size, GACC_STORE); in kvm_s390_guest_mem_op()
4739 if (copy_from_user(tmpbuf, uaddr, mop->size)) { in kvm_s390_guest_mem_op()
4740 r = -EFAULT; in kvm_s390_guest_mem_op()
4743 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size); in kvm_s390_guest_mem_op()
4747 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0) in kvm_s390_guest_mem_op()
4748 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); in kvm_s390_guest_mem_op()
4759 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_s390_guest_memsida_op()
4761 switch (mop->op) { in kvm_s390_guest_memsida_op()
4768 /* we are locked against sida going away by the vcpu->mutex */ in kvm_s390_guest_memsida_op()
4772 r = -EINVAL; in kvm_s390_guest_memsida_op()
4775 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); in kvm_s390_guest_memsida_op()
4782 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_async_ioctl()
4790 return -EFAULT; in kvm_arch_vcpu_async_ioctl()
4798 return -EFAULT; in kvm_arch_vcpu_async_ioctl()
4800 return -EINVAL; in kvm_arch_vcpu_async_ioctl()
4804 return -ENOIOCTLCMD; in kvm_arch_vcpu_async_ioctl()
4810 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl()
4820 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
4822 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
4827 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4867 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4870 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4884 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4888 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
4889 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4893 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr, in kvm_arch_vcpu_ioctl()
4901 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4905 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
4906 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4910 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr, in kvm_arch_vcpu_ioctl()
4916 r = gmap_fault(vcpu->arch.gmap, arg, 0); in kvm_arch_vcpu_ioctl()
4922 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4934 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4940 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4946 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4958 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4962 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4972 r = -ENOTTY; in kvm_arch_vcpu_ioctl()
4982 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET) in kvm_arch_vcpu_fault()
4983 && (kvm_is_ucontrol(vcpu->kvm))) { in kvm_arch_vcpu_fault()
4984 vmf->page = virt_to_page(vcpu->arch.sie_block); in kvm_arch_vcpu_fault()
4985 get_page(vmf->page); in kvm_arch_vcpu_fault()
5003 if (mem->userspace_addr & 0xffffful) in kvm_arch_prepare_memory_region()
5004 return -EINVAL; in kvm_arch_prepare_memory_region()
5006 if (mem->memory_size & 0xffffful) in kvm_arch_prepare_memory_region()
5007 return -EINVAL; in kvm_arch_prepare_memory_region()
5009 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit) in kvm_arch_prepare_memory_region()
5010 return -EINVAL; in kvm_arch_prepare_memory_region()
5014 return -EINVAL; in kvm_arch_prepare_memory_region()
5028 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
5029 old->npages * PAGE_SIZE); in kvm_arch_commit_memory_region()
5032 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
5033 old->npages * PAGE_SIZE); in kvm_arch_commit_memory_region()
5038 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, in kvm_arch_commit_memory_region()
5039 mem->guest_phys_addr, mem->memory_size); in kvm_arch_commit_memory_region()
5060 vcpu->valid_wakeup = false; in kvm_arch_vcpu_block_finish()
5069 return -ENODEV; in kvm_s390_init()
5074 return -EINVAL; in kvm_s390_init()