• Home
  • Raw
  • Download

Lines Matching +full:ipa +full:- +full:clock +full:- +full:query

1 // SPDX-License-Identifier: GPL-2.0
13 #define KMSG_COMPONENT "kvm-s390"
36 #include <asm/asm-offsets.h>
49 #include "kvm-s390.h"
55 #include "trace-s390.h"
221 * the feature is opt-in anyway
236 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
257 /* available subfunctions indicated via query / "test bit" */
277 * -delta to the epoch. in kvm_clock_sync_scb()
279 delta = -delta; in kvm_clock_sync_scb()
281 /* sign-extension - we're adding to signed values below */ in kvm_clock_sync_scb()
283 delta_idx = -1; in kvm_clock_sync_scb()
285 scb->epoch += delta; in kvm_clock_sync_scb()
286 if (scb->ecd & ECD_MEF) { in kvm_clock_sync_scb()
287 scb->epdx += delta_idx; in kvm_clock_sync_scb()
288 if (scb->epoch < delta) in kvm_clock_sync_scb()
289 scb->epdx += 1; in kvm_clock_sync_scb()
309 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta); in kvm_clock_sync()
311 kvm->arch.epoch = vcpu->arch.sie_block->epoch; in kvm_clock_sync()
312 kvm->arch.epdx = vcpu->arch.sie_block->epdx; in kvm_clock_sync()
314 if (vcpu->arch.cputm_enabled) in kvm_clock_sync()
315 vcpu->arch.cputm_start += *delta; in kvm_clock_sync()
316 if (vcpu->arch.vsie_block) in kvm_clock_sync()
317 kvm_clock_sync_scb(vcpu->arch.vsie_block, in kvm_clock_sync()
350 static __always_inline void __insn32_query(unsigned int opcode, u8 *query) in __insn32_query() argument
354 " lgr 1,%[query]\n" in __insn32_query()
358 : [query] "d" ((unsigned long)query), [opc] "i" (opcode) in __insn32_query()
374 if (test_facility(28)) /* TOD-clock steering */ in kvm_s390_cpu_feat_init()
470 int rc = -ENOMEM; in __kvm_s390_init()
472 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long)); in __kvm_s390_init()
474 return -ENOMEM; in __kvm_s390_init()
476 kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long)); in __kvm_s390_init()
546 return -EINVAL; in kvm_arch_dev_ioctl()
675 struct gmap *gmap = kvm->arch.gmap; in kvm_arch_sync_dirty_log()
679 cur_gfn = memslot->base_gfn; in kvm_arch_sync_dirty_log()
680 last_gfn = memslot->base_gfn + memslot->npages; in kvm_arch_sync_dirty_log()
715 return -EINVAL; in kvm_vm_ioctl_get_dirty_log()
717 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
719 r = -EINVAL; in kvm_vm_ioctl_get_dirty_log()
720 if (log->slot >= KVM_USER_MEM_SLOTS) in kvm_vm_ioctl_get_dirty_log()
730 memset(memslot->dirty_bitmap, 0, n); in kvm_vm_ioctl_get_dirty_log()
734 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
752 if (cap->flags) in kvm_vm_ioctl_enable_cap()
753 return -EINVAL; in kvm_vm_ioctl_enable_cap()
755 switch (cap->cap) { in kvm_vm_ioctl_enable_cap()
758 kvm->arch.use_irqchip = 1; in kvm_vm_ioctl_enable_cap()
763 kvm->arch.user_sigp = 1; in kvm_vm_ioctl_enable_cap()
767 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
768 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
769 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
771 set_kvm_facility(kvm->arch.model.fac_mask, 129); in kvm_vm_ioctl_enable_cap()
772 set_kvm_facility(kvm->arch.model.fac_list, 129); in kvm_vm_ioctl_enable_cap()
774 set_kvm_facility(kvm->arch.model.fac_mask, 134); in kvm_vm_ioctl_enable_cap()
775 set_kvm_facility(kvm->arch.model.fac_list, 134); in kvm_vm_ioctl_enable_cap()
778 set_kvm_facility(kvm->arch.model.fac_mask, 135); in kvm_vm_ioctl_enable_cap()
779 set_kvm_facility(kvm->arch.model.fac_list, 135); in kvm_vm_ioctl_enable_cap()
782 set_kvm_facility(kvm->arch.model.fac_mask, 148); in kvm_vm_ioctl_enable_cap()
783 set_kvm_facility(kvm->arch.model.fac_list, 148); in kvm_vm_ioctl_enable_cap()
786 set_kvm_facility(kvm->arch.model.fac_mask, 152); in kvm_vm_ioctl_enable_cap()
787 set_kvm_facility(kvm->arch.model.fac_list, 152); in kvm_vm_ioctl_enable_cap()
790 set_kvm_facility(kvm->arch.model.fac_mask, 192); in kvm_vm_ioctl_enable_cap()
791 set_kvm_facility(kvm->arch.model.fac_list, 192); in kvm_vm_ioctl_enable_cap()
795 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
796 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
801 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
802 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
803 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
804 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
806 set_kvm_facility(kvm->arch.model.fac_mask, 64); in kvm_vm_ioctl_enable_cap()
807 set_kvm_facility(kvm->arch.model.fac_list, 64); in kvm_vm_ioctl_enable_cap()
810 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
815 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
816 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
817 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
819 set_kvm_facility(kvm->arch.model.fac_mask, 72); in kvm_vm_ioctl_enable_cap()
820 set_kvm_facility(kvm->arch.model.fac_list, 72); in kvm_vm_ioctl_enable_cap()
823 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
828 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
829 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
830 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
831 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
833 set_kvm_facility(kvm->arch.model.fac_mask, 133); in kvm_vm_ioctl_enable_cap()
834 set_kvm_facility(kvm->arch.model.fac_list, 133); in kvm_vm_ioctl_enable_cap()
837 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
842 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
843 if (kvm->created_vcpus) in kvm_vm_ioctl_enable_cap()
844 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
845 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_enable_cap()
846 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
849 mmap_write_lock(kvm->mm); in kvm_vm_ioctl_enable_cap()
850 kvm->mm->context.allow_gmap_hpage_1m = 1; in kvm_vm_ioctl_enable_cap()
851 mmap_write_unlock(kvm->mm); in kvm_vm_ioctl_enable_cap()
857 kvm->arch.use_skf = 0; in kvm_vm_ioctl_enable_cap()
858 kvm->arch.use_pfmfi = 0; in kvm_vm_ioctl_enable_cap()
860 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
866 kvm->arch.user_stsi = 1; in kvm_vm_ioctl_enable_cap()
871 kvm->arch.user_instr0 = 1; in kvm_vm_ioctl_enable_cap()
876 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
877 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
878 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
879 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
881 set_kvm_facility(kvm->arch.model.fac_mask, 11); in kvm_vm_ioctl_enable_cap()
882 set_kvm_facility(kvm->arch.model.fac_list, 11); in kvm_vm_ioctl_enable_cap()
885 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
890 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
900 switch (attr->attr) { in kvm_s390_get_mem_control()
903 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes", in kvm_s390_get_mem_control()
904 kvm->arch.mem_limit); in kvm_s390_get_mem_control()
905 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr)) in kvm_s390_get_mem_control()
906 ret = -EFAULT; in kvm_s390_get_mem_control()
909 ret = -ENXIO; in kvm_s390_get_mem_control()
919 switch (attr->attr) { in kvm_s390_set_mem_control()
921 ret = -ENXIO; in kvm_s390_set_mem_control()
926 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
927 if (kvm->created_vcpus) in kvm_s390_set_mem_control()
928 ret = -EBUSY; in kvm_s390_set_mem_control()
929 else if (kvm->mm->context.allow_gmap_hpage_1m) in kvm_s390_set_mem_control()
930 ret = -EINVAL; in kvm_s390_set_mem_control()
932 kvm->arch.use_cmma = 1; in kvm_s390_set_mem_control()
934 kvm->arch.use_pfmfi = 0; in kvm_s390_set_mem_control()
937 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
940 ret = -ENXIO; in kvm_s390_set_mem_control()
943 ret = -EINVAL; in kvm_s390_set_mem_control()
944 if (!kvm->arch.use_cmma) in kvm_s390_set_mem_control()
948 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
949 idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_mem_control()
950 s390_reset_cmma(kvm->arch.gmap->mm); in kvm_s390_set_mem_control()
951 srcu_read_unlock(&kvm->srcu, idx); in kvm_s390_set_mem_control()
952 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
959 return -EINVAL; in kvm_s390_set_mem_control()
961 if (get_user(new_limit, (u64 __user *)attr->addr)) in kvm_s390_set_mem_control()
962 return -EFAULT; in kvm_s390_set_mem_control()
964 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT && in kvm_s390_set_mem_control()
965 new_limit > kvm->arch.mem_limit) in kvm_s390_set_mem_control()
966 return -E2BIG; in kvm_s390_set_mem_control()
969 return -EINVAL; in kvm_s390_set_mem_control()
973 new_limit -= 1; in kvm_s390_set_mem_control()
975 ret = -EBUSY; in kvm_s390_set_mem_control()
976 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
977 if (!kvm->created_vcpus) { in kvm_s390_set_mem_control()
979 struct gmap *new = gmap_create(current->mm, new_limit); in kvm_s390_set_mem_control()
982 ret = -ENOMEM; in kvm_s390_set_mem_control()
984 gmap_remove(kvm->arch.gmap); in kvm_s390_set_mem_control()
985 new->private = kvm; in kvm_s390_set_mem_control()
986 kvm->arch.gmap = new; in kvm_s390_set_mem_control()
990 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
993 (void *) kvm->arch.gmap->asce); in kvm_s390_set_mem_control()
997 ret = -ENXIO; in kvm_s390_set_mem_control()
1023 mutex_lock(&kvm->lock); in kvm_s390_vm_set_crypto()
1024 switch (attr->attr) { in kvm_s390_vm_set_crypto()
1027 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1028 return -EINVAL; in kvm_s390_vm_set_crypto()
1031 kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_vm_set_crypto()
1032 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1033 kvm->arch.crypto.aes_kw = 1; in kvm_s390_vm_set_crypto()
1038 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1039 return -EINVAL; in kvm_s390_vm_set_crypto()
1042 kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_vm_set_crypto()
1043 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1044 kvm->arch.crypto.dea_kw = 1; in kvm_s390_vm_set_crypto()
1049 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1050 return -EINVAL; in kvm_s390_vm_set_crypto()
1052 kvm->arch.crypto.aes_kw = 0; in kvm_s390_vm_set_crypto()
1053 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
1054 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1059 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1060 return -EINVAL; in kvm_s390_vm_set_crypto()
1062 kvm->arch.crypto.dea_kw = 0; in kvm_s390_vm_set_crypto()
1063 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
1064 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1069 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1070 return -EOPNOTSUPP; in kvm_s390_vm_set_crypto()
1072 kvm->arch.crypto.apie = 1; in kvm_s390_vm_set_crypto()
1076 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1077 return -EOPNOTSUPP; in kvm_s390_vm_set_crypto()
1079 kvm->arch.crypto.apie = 0; in kvm_s390_vm_set_crypto()
1082 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1083 return -ENXIO; in kvm_s390_vm_set_crypto()
1087 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1094 if (!vcpu->kvm->arch.use_zpci_interp) in kvm_s390_vcpu_pci_setup()
1097 vcpu->arch.sie_block->ecb2 |= ECB2_ZPCI_LSI; in kvm_s390_vcpu_pci_setup()
1098 vcpu->arch.sie_block->ecb3 |= ECB3_AISII + ECB3_AISI; in kvm_s390_vcpu_pci_setup()
1106 lockdep_assert_held(&kvm->lock); in kvm_s390_vcpu_pci_enable_interp()
1115 kvm->arch.use_zpci_interp = 1; in kvm_s390_vcpu_pci_enable_interp()
1137 * Must be called with kvm->srcu held to avoid races on memslots, and with
1138 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
1148 if (kvm->arch.migration_mode) in kvm_s390_vm_start_migration()
1152 return -EINVAL; in kvm_s390_vm_start_migration()
1154 if (!kvm->arch.use_cmma) { in kvm_s390_vm_start_migration()
1155 kvm->arch.migration_mode = 1; in kvm_s390_vm_start_migration()
1160 if (!ms->dirty_bitmap) in kvm_s390_vm_start_migration()
1161 return -EINVAL; in kvm_s390_vm_start_migration()
1169 ram_pages += ms->npages; in kvm_s390_vm_start_migration()
1171 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages); in kvm_s390_vm_start_migration()
1172 kvm->arch.migration_mode = 1; in kvm_s390_vm_start_migration()
1178 * Must be called with kvm->slots_lock to avoid races with ourselves and
1184 if (!kvm->arch.migration_mode) in kvm_s390_vm_stop_migration()
1186 kvm->arch.migration_mode = 0; in kvm_s390_vm_stop_migration()
1187 if (kvm->arch.use_cmma) in kvm_s390_vm_stop_migration()
1195 int res = -ENXIO; in kvm_s390_vm_set_migration()
1197 mutex_lock(&kvm->slots_lock); in kvm_s390_vm_set_migration()
1198 switch (attr->attr) { in kvm_s390_vm_set_migration()
1208 mutex_unlock(&kvm->slots_lock); in kvm_s390_vm_set_migration()
1216 u64 mig = kvm->arch.migration_mode; in kvm_s390_vm_get_migration()
1218 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS) in kvm_s390_vm_get_migration()
1219 return -ENXIO; in kvm_s390_vm_get_migration()
1221 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig))) in kvm_s390_vm_get_migration()
1222 return -EFAULT; in kvm_s390_vm_get_migration()
1232 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod))) in kvm_s390_set_tod_ext()
1233 return -EFAULT; in kvm_s390_set_tod_ext()
1236 return -EINVAL; in kvm_s390_set_tod_ext()
1249 if (copy_from_user(&gtod_high, (void __user *)attr->addr, in kvm_s390_set_tod_high()
1251 return -EFAULT; in kvm_s390_set_tod_high()
1254 return -EINVAL; in kvm_s390_set_tod_high()
1264 if (copy_from_user(&gtod.tod, (void __user *)attr->addr, in kvm_s390_set_tod_low()
1266 return -EFAULT; in kvm_s390_set_tod_low()
1277 if (attr->flags) in kvm_s390_set_tod()
1278 return -EINVAL; in kvm_s390_set_tod()
1280 mutex_lock(&kvm->lock); in kvm_s390_set_tod()
1286 ret = -EOPNOTSUPP; in kvm_s390_set_tod()
1290 switch (attr->attr) { in kvm_s390_set_tod()
1301 ret = -ENXIO; in kvm_s390_set_tod()
1306 mutex_unlock(&kvm->lock); in kvm_s390_set_tod()
1319 gtod->tod = clk.tod + kvm->arch.epoch; in kvm_s390_get_tod_clock()
1320 gtod->epoch_idx = 0; in kvm_s390_get_tod_clock()
1322 gtod->epoch_idx = clk.ei + kvm->arch.epdx; in kvm_s390_get_tod_clock()
1323 if (gtod->tod < clk.tod) in kvm_s390_get_tod_clock()
1324 gtod->epoch_idx += 1; in kvm_s390_get_tod_clock()
1336 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod))) in kvm_s390_get_tod_ext()
1337 return -EFAULT; in kvm_s390_get_tod_ext()
1339 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx", in kvm_s390_get_tod_ext()
1348 if (copy_to_user((void __user *)attr->addr, &gtod_high, in kvm_s390_get_tod_high()
1350 return -EFAULT; in kvm_s390_get_tod_high()
1351 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high); in kvm_s390_get_tod_high()
1361 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod))) in kvm_s390_get_tod_low()
1362 return -EFAULT; in kvm_s390_get_tod_low()
1363 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod); in kvm_s390_get_tod_low()
1372 if (attr->flags) in kvm_s390_get_tod()
1373 return -EINVAL; in kvm_s390_get_tod()
1375 switch (attr->attr) { in kvm_s390_get_tod()
1386 ret = -ENXIO; in kvm_s390_get_tod()
1398 mutex_lock(&kvm->lock); in kvm_s390_set_processor()
1399 if (kvm->created_vcpus) { in kvm_s390_set_processor()
1400 ret = -EBUSY; in kvm_s390_set_processor()
1405 ret = -ENOMEM; in kvm_s390_set_processor()
1408 if (!copy_from_user(proc, (void __user *)attr->addr, in kvm_s390_set_processor()
1410 kvm->arch.model.cpuid = proc->cpuid; in kvm_s390_set_processor()
1413 if (lowest_ibc && proc->ibc) { in kvm_s390_set_processor()
1414 if (proc->ibc > unblocked_ibc) in kvm_s390_set_processor()
1415 kvm->arch.model.ibc = unblocked_ibc; in kvm_s390_set_processor()
1416 else if (proc->ibc < lowest_ibc) in kvm_s390_set_processor()
1417 kvm->arch.model.ibc = lowest_ibc; in kvm_s390_set_processor()
1419 kvm->arch.model.ibc = proc->ibc; in kvm_s390_set_processor()
1421 memcpy(kvm->arch.model.fac_list, proc->fac_list, in kvm_s390_set_processor()
1424 kvm->arch.model.ibc, in kvm_s390_set_processor()
1425 kvm->arch.model.cpuid); in kvm_s390_set_processor()
1427 kvm->arch.model.fac_list[0], in kvm_s390_set_processor()
1428 kvm->arch.model.fac_list[1], in kvm_s390_set_processor()
1429 kvm->arch.model.fac_list[2]); in kvm_s390_set_processor()
1431 ret = -EFAULT; in kvm_s390_set_processor()
1434 mutex_unlock(&kvm->lock); in kvm_s390_set_processor()
1443 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data))) in kvm_s390_set_processor_feat()
1444 return -EFAULT; in kvm_s390_set_processor_feat()
1448 return -EINVAL; in kvm_s390_set_processor_feat()
1450 mutex_lock(&kvm->lock); in kvm_s390_set_processor_feat()
1451 if (kvm->created_vcpus) { in kvm_s390_set_processor_feat()
1452 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_feat()
1453 return -EBUSY; in kvm_s390_set_processor_feat()
1455 bitmap_from_arr64(kvm->arch.cpu_feat, data.feat, KVM_S390_VM_CPU_FEAT_NR_BITS); in kvm_s390_set_processor_feat()
1456 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_feat()
1467 mutex_lock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1468 if (kvm->created_vcpus) { in kvm_s390_set_processor_subfunc()
1469 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1470 return -EBUSY; in kvm_s390_set_processor_subfunc()
1473 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr, in kvm_s390_set_processor_subfunc()
1475 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1476 return -EFAULT; in kvm_s390_set_processor_subfunc()
1478 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1481 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], in kvm_s390_set_processor_subfunc()
1482 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], in kvm_s390_set_processor_subfunc()
1483 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], in kvm_s390_set_processor_subfunc()
1484 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); in kvm_s390_set_processor_subfunc()
1486 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], in kvm_s390_set_processor_subfunc()
1487 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); in kvm_s390_set_processor_subfunc()
1489 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], in kvm_s390_set_processor_subfunc()
1490 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); in kvm_s390_set_processor_subfunc()
1492 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], in kvm_s390_set_processor_subfunc()
1493 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); in kvm_s390_set_processor_subfunc()
1495 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], in kvm_s390_set_processor_subfunc()
1496 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); in kvm_s390_set_processor_subfunc()
1498 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], in kvm_s390_set_processor_subfunc()
1499 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); in kvm_s390_set_processor_subfunc()
1501 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], in kvm_s390_set_processor_subfunc()
1502 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); in kvm_s390_set_processor_subfunc()
1504 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], in kvm_s390_set_processor_subfunc()
1505 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); in kvm_s390_set_processor_subfunc()
1507 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], in kvm_s390_set_processor_subfunc()
1508 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); in kvm_s390_set_processor_subfunc()
1510 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], in kvm_s390_set_processor_subfunc()
1511 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); in kvm_s390_set_processor_subfunc()
1513 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], in kvm_s390_set_processor_subfunc()
1514 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); in kvm_s390_set_processor_subfunc()
1516 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], in kvm_s390_set_processor_subfunc()
1517 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); in kvm_s390_set_processor_subfunc()
1519 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], in kvm_s390_set_processor_subfunc()
1520 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); in kvm_s390_set_processor_subfunc()
1522 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], in kvm_s390_set_processor_subfunc()
1523 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); in kvm_s390_set_processor_subfunc()
1525 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], in kvm_s390_set_processor_subfunc()
1526 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); in kvm_s390_set_processor_subfunc()
1528 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], in kvm_s390_set_processor_subfunc()
1529 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], in kvm_s390_set_processor_subfunc()
1530 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], in kvm_s390_set_processor_subfunc()
1531 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); in kvm_s390_set_processor_subfunc()
1533 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], in kvm_s390_set_processor_subfunc()
1534 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], in kvm_s390_set_processor_subfunc()
1535 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], in kvm_s390_set_processor_subfunc()
1536 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); in kvm_s390_set_processor_subfunc()
1552 struct kvm_s390_vm_cpu_uv_feat __user *ptr = (void __user *)attr->addr; in kvm_s390_set_uv_feat()
1556 if (get_user(data, &ptr->feat)) in kvm_s390_set_uv_feat()
1557 return -EFAULT; in kvm_s390_set_uv_feat()
1559 return -EINVAL; in kvm_s390_set_uv_feat()
1561 mutex_lock(&kvm->lock); in kvm_s390_set_uv_feat()
1562 if (kvm->created_vcpus) { in kvm_s390_set_uv_feat()
1563 mutex_unlock(&kvm->lock); in kvm_s390_set_uv_feat()
1564 return -EBUSY; in kvm_s390_set_uv_feat()
1566 kvm->arch.model.uv_feat_guest.feat = data; in kvm_s390_set_uv_feat()
1567 mutex_unlock(&kvm->lock); in kvm_s390_set_uv_feat()
1569 VM_EVENT(kvm, 3, "SET: guest UV-feat: 0x%16.16lx", data); in kvm_s390_set_uv_feat()
1576 int ret = -ENXIO; in kvm_s390_set_cpu_model()
1578 switch (attr->attr) { in kvm_s390_set_cpu_model()
1602 ret = -ENOMEM; in kvm_s390_get_processor()
1605 proc->cpuid = kvm->arch.model.cpuid; in kvm_s390_get_processor()
1606 proc->ibc = kvm->arch.model.ibc; in kvm_s390_get_processor()
1607 memcpy(&proc->fac_list, kvm->arch.model.fac_list, in kvm_s390_get_processor()
1610 kvm->arch.model.ibc, in kvm_s390_get_processor()
1611 kvm->arch.model.cpuid); in kvm_s390_get_processor()
1613 kvm->arch.model.fac_list[0], in kvm_s390_get_processor()
1614 kvm->arch.model.fac_list[1], in kvm_s390_get_processor()
1615 kvm->arch.model.fac_list[2]); in kvm_s390_get_processor()
1616 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc))) in kvm_s390_get_processor()
1617 ret = -EFAULT; in kvm_s390_get_processor()
1630 ret = -ENOMEM; in kvm_s390_get_machine()
1633 get_cpu_id((struct cpuid *) &mach->cpuid); in kvm_s390_get_machine()
1634 mach->ibc = sclp.ibc; in kvm_s390_get_machine()
1635 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask, in kvm_s390_get_machine()
1637 memcpy((unsigned long *)&mach->fac_list, stfle_fac_list, in kvm_s390_get_machine()
1640 kvm->arch.model.ibc, in kvm_s390_get_machine()
1641 kvm->arch.model.cpuid); in kvm_s390_get_machine()
1643 mach->fac_mask[0], in kvm_s390_get_machine()
1644 mach->fac_mask[1], in kvm_s390_get_machine()
1645 mach->fac_mask[2]); in kvm_s390_get_machine()
1647 mach->fac_list[0], in kvm_s390_get_machine()
1648 mach->fac_list[1], in kvm_s390_get_machine()
1649 mach->fac_list[2]); in kvm_s390_get_machine()
1650 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) in kvm_s390_get_machine()
1651 ret = -EFAULT; in kvm_s390_get_machine()
1662 bitmap_to_arr64(data.feat, kvm->arch.cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS); in kvm_s390_get_processor_feat()
1663 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data))) in kvm_s390_get_processor_feat()
1664 return -EFAULT; in kvm_s390_get_processor_feat()
1678 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data))) in kvm_s390_get_machine_feat()
1679 return -EFAULT; in kvm_s390_get_machine_feat()
1690 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs, in kvm_s390_get_processor_subfunc()
1692 return -EFAULT; in kvm_s390_get_processor_subfunc()
1695 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], in kvm_s390_get_processor_subfunc()
1696 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], in kvm_s390_get_processor_subfunc()
1697 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], in kvm_s390_get_processor_subfunc()
1698 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); in kvm_s390_get_processor_subfunc()
1700 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], in kvm_s390_get_processor_subfunc()
1701 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); in kvm_s390_get_processor_subfunc()
1703 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], in kvm_s390_get_processor_subfunc()
1704 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); in kvm_s390_get_processor_subfunc()
1706 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], in kvm_s390_get_processor_subfunc()
1707 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); in kvm_s390_get_processor_subfunc()
1709 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], in kvm_s390_get_processor_subfunc()
1710 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); in kvm_s390_get_processor_subfunc()
1712 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], in kvm_s390_get_processor_subfunc()
1713 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); in kvm_s390_get_processor_subfunc()
1715 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], in kvm_s390_get_processor_subfunc()
1716 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); in kvm_s390_get_processor_subfunc()
1718 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], in kvm_s390_get_processor_subfunc()
1719 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); in kvm_s390_get_processor_subfunc()
1721 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], in kvm_s390_get_processor_subfunc()
1722 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); in kvm_s390_get_processor_subfunc()
1724 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], in kvm_s390_get_processor_subfunc()
1725 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); in kvm_s390_get_processor_subfunc()
1727 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], in kvm_s390_get_processor_subfunc()
1728 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); in kvm_s390_get_processor_subfunc()
1730 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], in kvm_s390_get_processor_subfunc()
1731 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); in kvm_s390_get_processor_subfunc()
1733 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], in kvm_s390_get_processor_subfunc()
1734 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); in kvm_s390_get_processor_subfunc()
1736 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], in kvm_s390_get_processor_subfunc()
1737 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); in kvm_s390_get_processor_subfunc()
1739 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], in kvm_s390_get_processor_subfunc()
1740 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); in kvm_s390_get_processor_subfunc()
1742 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], in kvm_s390_get_processor_subfunc()
1743 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], in kvm_s390_get_processor_subfunc()
1744 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], in kvm_s390_get_processor_subfunc()
1745 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); in kvm_s390_get_processor_subfunc()
1747 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], in kvm_s390_get_processor_subfunc()
1748 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], in kvm_s390_get_processor_subfunc()
1749 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], in kvm_s390_get_processor_subfunc()
1750 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); in kvm_s390_get_processor_subfunc()
1758 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc, in kvm_s390_get_machine_subfunc()
1760 return -EFAULT; in kvm_s390_get_machine_subfunc()
1825 struct kvm_s390_vm_cpu_uv_feat __user *dst = (void __user *)attr->addr; in kvm_s390_get_processor_uv_feat()
1826 unsigned long feat = kvm->arch.model.uv_feat_guest.feat; in kvm_s390_get_processor_uv_feat()
1828 if (put_user(feat, &dst->feat)) in kvm_s390_get_processor_uv_feat()
1829 return -EFAULT; in kvm_s390_get_processor_uv_feat()
1830 VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat); in kvm_s390_get_processor_uv_feat()
1837 struct kvm_s390_vm_cpu_uv_feat __user *dst = (void __user *)attr->addr; in kvm_s390_get_machine_uv_feat()
1843 if (put_user(feat, &dst->feat)) in kvm_s390_get_machine_uv_feat()
1844 return -EFAULT; in kvm_s390_get_machine_uv_feat()
1845 VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat); in kvm_s390_get_machine_uv_feat()
1852 int ret = -ENXIO; in kvm_s390_get_cpu_model()
1854 switch (attr->attr) { in kvm_s390_get_cpu_model()
1884 * kvm_s390_update_topology_change_report - update CPU topology change report
1888 * Updates the Multiprocessor Topology-Change-Report bit to signal
1899 read_lock(&kvm->arch.sca_lock); in kvm_s390_update_topology_change_report()
1900 sca = kvm->arch.sca; in kvm_s390_update_topology_change_report()
1902 old = READ_ONCE(sca->utility); in kvm_s390_update_topology_change_report()
1905 } while (cmpxchg(&sca->utility.val, old.val, new.val) != old.val); in kvm_s390_update_topology_change_report()
1906 read_unlock(&kvm->arch.sca_lock); in kvm_s390_update_topology_change_report()
1913 return -ENXIO; in kvm_s390_set_topo_change_indication()
1915 kvm_s390_update_topology_change_report(kvm, !!attr->attr); in kvm_s390_set_topo_change_indication()
1925 return -ENXIO; in kvm_s390_get_topo_change_indication()
1927 read_lock(&kvm->arch.sca_lock); in kvm_s390_get_topo_change_indication()
1928 topo = ((struct bsca_block *)kvm->arch.sca)->utility.mtcr; in kvm_s390_get_topo_change_indication()
1929 read_unlock(&kvm->arch.sca_lock); in kvm_s390_get_topo_change_indication()
1931 return put_user(topo, (u8 __user *)attr->addr); in kvm_s390_get_topo_change_indication()
1938 switch (attr->group) { in kvm_s390_vm_set_attr()
1958 ret = -ENXIO; in kvm_s390_vm_set_attr()
1969 switch (attr->group) { in kvm_s390_vm_get_attr()
1986 ret = -ENXIO; in kvm_s390_vm_get_attr()
1997 switch (attr->group) { in kvm_s390_vm_has_attr()
1999 switch (attr->attr) { in kvm_s390_vm_has_attr()
2002 ret = sclp.has_cmma ? 0 : -ENXIO; in kvm_s390_vm_has_attr()
2008 ret = -ENXIO; in kvm_s390_vm_has_attr()
2013 switch (attr->attr) { in kvm_s390_vm_has_attr()
2019 ret = -ENXIO; in kvm_s390_vm_has_attr()
2024 switch (attr->attr) { in kvm_s390_vm_has_attr()
2036 ret = -ENXIO; in kvm_s390_vm_has_attr()
2041 switch (attr->attr) { in kvm_s390_vm_has_attr()
2050 ret = ap_instructions_available() ? 0 : -ENXIO; in kvm_s390_vm_has_attr()
2053 ret = -ENXIO; in kvm_s390_vm_has_attr()
2061 ret = test_kvm_facility(kvm, 11) ? 0 : -ENXIO; in kvm_s390_vm_has_attr()
2064 ret = -ENXIO; in kvm_s390_vm_has_attr()
2077 if (args->flags != 0) in kvm_s390_get_skeys()
2078 return -EINVAL; in kvm_s390_get_skeys()
2081 if (!mm_uses_skeys(current->mm)) in kvm_s390_get_skeys()
2085 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) in kvm_s390_get_skeys()
2086 return -EINVAL; in kvm_s390_get_skeys()
2088 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT); in kvm_s390_get_skeys()
2090 return -ENOMEM; in kvm_s390_get_skeys()
2092 mmap_read_lock(current->mm); in kvm_s390_get_skeys()
2093 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_get_skeys()
2094 for (i = 0; i < args->count; i++) { in kvm_s390_get_skeys()
2095 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_get_skeys()
2097 r = -EFAULT; in kvm_s390_get_skeys()
2101 r = get_guest_storage_key(current->mm, hva, &keys[i]); in kvm_s390_get_skeys()
2105 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_get_skeys()
2106 mmap_read_unlock(current->mm); in kvm_s390_get_skeys()
2109 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys, in kvm_s390_get_skeys()
2110 sizeof(uint8_t) * args->count); in kvm_s390_get_skeys()
2112 r = -EFAULT; in kvm_s390_get_skeys()
2126 if (args->flags != 0) in kvm_s390_set_skeys()
2127 return -EINVAL; in kvm_s390_set_skeys()
2130 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) in kvm_s390_set_skeys()
2131 return -EINVAL; in kvm_s390_set_skeys()
2133 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT); in kvm_s390_set_skeys()
2135 return -ENOMEM; in kvm_s390_set_skeys()
2137 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr, in kvm_s390_set_skeys()
2138 sizeof(uint8_t) * args->count); in kvm_s390_set_skeys()
2140 r = -EFAULT; in kvm_s390_set_skeys()
2150 mmap_read_lock(current->mm); in kvm_s390_set_skeys()
2151 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_skeys()
2152 while (i < args->count) { in kvm_s390_set_skeys()
2154 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_set_skeys()
2156 r = -EFAULT; in kvm_s390_set_skeys()
2162 r = -EINVAL; in kvm_s390_set_skeys()
2166 r = set_guest_storage_key(current->mm, hva, keys[i], 0); in kvm_s390_set_skeys()
2168 r = fixup_user_fault(current->mm, hva, in kvm_s390_set_skeys()
2176 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_set_skeys()
2177 mmap_read_unlock(current->mm); in kvm_s390_set_skeys()
2195 unsigned long pgstev, hva, cur_gfn = args->start_gfn; in kvm_s390_peek_cmma()
2197 args->count = 0; in kvm_s390_peek_cmma()
2198 while (args->count < bufsize) { in kvm_s390_peek_cmma()
2205 return args->count ? 0 : -EFAULT; in kvm_s390_peek_cmma()
2206 if (get_pgste(kvm->mm, hva, &pgstev) < 0) in kvm_s390_peek_cmma()
2208 res[args->count++] = (pgstev >> 24) & 0x43; in kvm_s390_peek_cmma()
2225 unsigned long ofs = cur_gfn - ms->base_gfn; in kvm_s390_next_dirty_cmma()
2226 struct rb_node *mnode = &ms->gfn_node[slots->node_idx]; in kvm_s390_next_dirty_cmma()
2228 if (ms->base_gfn + ms->npages <= cur_gfn) { in kvm_s390_next_dirty_cmma()
2232 mnode = rb_first(&slots->gfn_tree); in kvm_s390_next_dirty_cmma()
2234 ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]); in kvm_s390_next_dirty_cmma()
2238 if (cur_gfn < ms->base_gfn) in kvm_s390_next_dirty_cmma()
2241 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs); in kvm_s390_next_dirty_cmma()
2242 while (ofs >= ms->npages && (mnode = rb_next(mnode))) { in kvm_s390_next_dirty_cmma()
2243 ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]); in kvm_s390_next_dirty_cmma()
2244 ofs = find_first_bit(kvm_second_dirty_bitmap(ms), ms->npages); in kvm_s390_next_dirty_cmma()
2246 return ms->base_gfn + ofs; in kvm_s390_next_dirty_cmma()
2259 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn); in kvm_s390_get_cmma()
2261 args->count = 0; in kvm_s390_get_cmma()
2262 args->start_gfn = cur_gfn; in kvm_s390_get_cmma()
2268 while (args->count < bufsize) { in kvm_s390_get_cmma()
2273 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms))) in kvm_s390_get_cmma()
2274 atomic64_dec(&kvm->arch.cmma_dirty_pages); in kvm_s390_get_cmma()
2275 if (get_pgste(kvm->mm, hva, &pgstev) < 0) in kvm_s390_get_cmma()
2278 res[args->count++] = (pgstev >> 24) & 0x43; in kvm_s390_get_cmma()
2287 (next_gfn - args->start_gfn >= bufsize)) in kvm_s390_get_cmma()
2291 if (cur_gfn - ms->base_gfn >= ms->npages) { in kvm_s390_get_cmma()
2315 if (!kvm->arch.use_cmma) in kvm_s390_get_cmma_bits()
2316 return -ENXIO; in kvm_s390_get_cmma_bits()
2318 if (args->flags & ~KVM_S390_CMMA_PEEK) in kvm_s390_get_cmma_bits()
2319 return -EINVAL; in kvm_s390_get_cmma_bits()
2320 /* Migration mode query, and we are not doing a migration */ in kvm_s390_get_cmma_bits()
2321 peek = !!(args->flags & KVM_S390_CMMA_PEEK); in kvm_s390_get_cmma_bits()
2322 if (!peek && !kvm->arch.migration_mode) in kvm_s390_get_cmma_bits()
2323 return -EINVAL; in kvm_s390_get_cmma_bits()
2325 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX); in kvm_s390_get_cmma_bits()
2326 if (!bufsize || !kvm->mm->context.uses_cmm) { in kvm_s390_get_cmma_bits()
2331 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) { in kvm_s390_get_cmma_bits()
2338 return -ENOMEM; in kvm_s390_get_cmma_bits()
2340 mmap_read_lock(kvm->mm); in kvm_s390_get_cmma_bits()
2341 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_get_cmma_bits()
2346 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_get_cmma_bits()
2347 mmap_read_unlock(kvm->mm); in kvm_s390_get_cmma_bits()
2349 if (kvm->arch.migration_mode) in kvm_s390_get_cmma_bits()
2350 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages); in kvm_s390_get_cmma_bits()
2352 args->remaining = 0; in kvm_s390_get_cmma_bits()
2354 if (copy_to_user((void __user *)args->values, values, args->count)) in kvm_s390_get_cmma_bits()
2355 ret = -EFAULT; in kvm_s390_get_cmma_bits()
2364 * set and the mm->context.uses_cmm flag is set.
2373 mask = args->mask; in kvm_s390_set_cmma_bits()
2375 if (!kvm->arch.use_cmma) in kvm_s390_set_cmma_bits()
2376 return -ENXIO; in kvm_s390_set_cmma_bits()
2378 if (args->flags != 0) in kvm_s390_set_cmma_bits()
2379 return -EINVAL; in kvm_s390_set_cmma_bits()
2381 if (args->count > KVM_S390_CMMA_SIZE_MAX) in kvm_s390_set_cmma_bits()
2382 return -EINVAL; in kvm_s390_set_cmma_bits()
2384 if (args->count == 0) in kvm_s390_set_cmma_bits()
2387 bits = vmalloc(array_size(sizeof(*bits), args->count)); in kvm_s390_set_cmma_bits()
2389 return -ENOMEM; in kvm_s390_set_cmma_bits()
2391 r = copy_from_user(bits, (void __user *)args->values, args->count); in kvm_s390_set_cmma_bits()
2393 r = -EFAULT; in kvm_s390_set_cmma_bits()
2397 mmap_read_lock(kvm->mm); in kvm_s390_set_cmma_bits()
2398 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_cmma_bits()
2399 for (i = 0; i < args->count; i++) { in kvm_s390_set_cmma_bits()
2400 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_set_cmma_bits()
2402 r = -EFAULT; in kvm_s390_set_cmma_bits()
2409 set_pgste_bits(kvm->mm, hva, mask, pgstev); in kvm_s390_set_cmma_bits()
2411 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_set_cmma_bits()
2412 mmap_read_unlock(kvm->mm); in kvm_s390_set_cmma_bits()
2414 if (!kvm->mm->context.uses_cmm) { in kvm_s390_set_cmma_bits()
2415 mmap_write_lock(kvm->mm); in kvm_s390_set_cmma_bits()
2416 kvm->mm->context.uses_cmm = 1; in kvm_s390_set_cmma_bits()
2417 mmap_write_unlock(kvm->mm); in kvm_s390_set_cmma_bits()
2425 * kvm_s390_cpus_from_pv - Convert all protected vCPUs in a protected VM to
2435 * Return: 0 in case of success, otherwise -EIO
2453 mutex_lock(&vcpu->mutex); in kvm_s390_cpus_from_pv()
2457 ret = -EIO; in kvm_s390_cpus_from_pv()
2459 mutex_unlock(&vcpu->mutex); in kvm_s390_cpus_from_pv()
2461 /* Ensure that we re-enable gisa if the non-PV guest used it but the PV guest did not. */ in kvm_s390_cpus_from_pv()
2468 * kvm_s390_cpus_to_pv - Convert all non-protected vCPUs in a protected VM
2476 * Return: 0 in case of success, otherwise -EIO
2491 mutex_lock(&vcpu->mutex); in kvm_s390_cpus_to_pv()
2493 mutex_unlock(&vcpu->mutex); in kvm_s390_cpus_to_pv()
2503 * Here we provide user space with a direct interface to query UV
2514 switch (info->header.id) { in kvm_s390_handle_pv_info()
2516 len_min = sizeof(info->header) + sizeof(info->vm); in kvm_s390_handle_pv_info()
2518 if (info->header.len_max < len_min) in kvm_s390_handle_pv_info()
2519 return -EINVAL; in kvm_s390_handle_pv_info()
2521 memcpy(info->vm.inst_calls_list, in kvm_s390_handle_pv_info()
2526 info->vm.max_cpus = uv_info.max_guest_cpu_id + 1; in kvm_s390_handle_pv_info()
2527 info->vm.max_guests = uv_info.max_num_sec_conf; in kvm_s390_handle_pv_info()
2528 info->vm.max_guest_addr = uv_info.max_sec_stor_addr; in kvm_s390_handle_pv_info()
2529 info->vm.feature_indication = uv_info.uv_feature_indications; in kvm_s390_handle_pv_info()
2534 len_min = sizeof(info->header) + sizeof(info->dump); in kvm_s390_handle_pv_info()
2536 if (info->header.len_max < len_min) in kvm_s390_handle_pv_info()
2537 return -EINVAL; in kvm_s390_handle_pv_info()
2539 info->dump.dump_cpu_buffer_len = uv_info.guest_cpu_stor_len; in kvm_s390_handle_pv_info()
2540 info->dump.dump_config_mem_buffer_per_1m = uv_info.conf_dump_storage_state_len; in kvm_s390_handle_pv_info()
2541 info->dump.dump_config_finalize_len = uv_info.conf_dump_finalize_len; in kvm_s390_handle_pv_info()
2545 return -EINVAL; in kvm_s390_handle_pv_info()
2552 int r = -EINVAL; in kvm_s390_pv_dmp()
2557 if (kvm->arch.pv.dumping) in kvm_s390_pv_dmp()
2567 UVC_CMD_DUMP_INIT, &cmd->rc, &cmd->rrc); in kvm_s390_pv_dmp()
2569 cmd->rc, cmd->rrc); in kvm_s390_pv_dmp()
2571 kvm->arch.pv.dumping = true; in kvm_s390_pv_dmp()
2574 r = -EINVAL; in kvm_s390_pv_dmp()
2579 if (!kvm->arch.pv.dumping) in kvm_s390_pv_dmp()
2588 &cmd->rc, &cmd->rrc); in kvm_s390_pv_dmp()
2592 if (!kvm->arch.pv.dumping) in kvm_s390_pv_dmp()
2595 r = -EINVAL; in kvm_s390_pv_dmp()
2600 &cmd->rc, &cmd->rrc); in kvm_s390_pv_dmp()
2604 r = -ENOTTY; in kvm_s390_pv_dmp()
2613 const bool need_lock = (cmd->cmd != KVM_PV_ASYNC_CLEANUP_PERFORM); in kvm_s390_handle_pv()
2614 void __user *argp = (void __user *)cmd->data; in kvm_s390_handle_pv()
2619 mutex_lock(&kvm->lock); in kvm_s390_handle_pv()
2621 switch (cmd->cmd) { in kvm_s390_handle_pv()
2623 r = -EINVAL; in kvm_s390_handle_pv()
2635 mmap_write_lock(current->mm); in kvm_s390_handle_pv()
2637 mmap_write_unlock(current->mm); in kvm_s390_handle_pv()
2641 r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2645 r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2650 set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2654 r = -EINVAL; in kvm_s390_handle_pv()
2658 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2666 r = kvm_s390_pv_set_aside(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2669 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2672 r = -EINVAL; in kvm_s390_handle_pv()
2675 /* kvm->lock must not be held; this is asserted inside the function. */ in kvm_s390_handle_pv()
2676 r = kvm_s390_pv_deinit_aside_vm(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2679 r = -EINVAL; in kvm_s390_handle_pv()
2683 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2691 r = kvm_s390_pv_deinit_cleanup_all(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2694 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2701 r = -EINVAL; in kvm_s390_handle_pv()
2705 r = -EFAULT; in kvm_s390_handle_pv()
2710 r = -EINVAL; in kvm_s390_handle_pv()
2714 r = -ENOMEM; in kvm_s390_handle_pv()
2719 r = -EFAULT; in kvm_s390_handle_pv()
2723 &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2731 r = -EINVAL; in kvm_s390_handle_pv()
2732 if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm)) in kvm_s390_handle_pv()
2735 r = -EFAULT; in kvm_s390_handle_pv()
2740 &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2744 r = -EINVAL; in kvm_s390_handle_pv()
2749 UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2750 KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc, in kvm_s390_handle_pv()
2751 cmd->rrc); in kvm_s390_handle_pv()
2755 r = -EINVAL; in kvm_s390_handle_pv()
2760 UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2762 cmd->rc, cmd->rrc); in kvm_s390_handle_pv()
2766 r = -EINVAL; in kvm_s390_handle_pv()
2771 UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2773 cmd->rc, cmd->rrc); in kvm_s390_handle_pv()
2783 * Maybe user space wants to query some of the data in kvm_s390_handle_pv()
2789 r = -EFAULT; in kvm_s390_handle_pv()
2793 r = -EINVAL; in kvm_s390_handle_pv()
2809 r = -EFAULT; in kvm_s390_handle_pv()
2819 r = -EINVAL; in kvm_s390_handle_pv()
2823 r = -EFAULT; in kvm_s390_handle_pv()
2832 r = -EFAULT; in kvm_s390_handle_pv()
2839 r = -ENOTTY; in kvm_s390_handle_pv()
2842 mutex_unlock(&kvm->lock); in kvm_s390_handle_pv()
2849 if (mop->flags & ~supported_flags || !mop->size) in mem_op_validate_common()
2850 return -EINVAL; in mem_op_validate_common()
2851 if (mop->size > MEM_OP_MAX_SIZE) in mem_op_validate_common()
2852 return -E2BIG; in mem_op_validate_common()
2853 if (mop->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION) { in mem_op_validate_common()
2854 if (mop->key > 0xf) in mem_op_validate_common()
2855 return -EINVAL; in mem_op_validate_common()
2857 mop->key = 0; in mem_op_validate_common()
2864 void __user *uaddr = (void __user *)mop->buf; in kvm_s390_vm_mem_op_abs()
2874 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) { in kvm_s390_vm_mem_op_abs()
2875 tmpbuf = vmalloc(mop->size); in kvm_s390_vm_mem_op_abs()
2877 return -ENOMEM; in kvm_s390_vm_mem_op_abs()
2880 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_vm_mem_op_abs()
2882 if (kvm_is_error_gpa(kvm, mop->gaddr)) { in kvm_s390_vm_mem_op_abs()
2887 acc_mode = mop->op == KVM_S390_MEMOP_ABSOLUTE_READ ? GACC_FETCH : GACC_STORE; in kvm_s390_vm_mem_op_abs()
2888 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { in kvm_s390_vm_mem_op_abs()
2889 r = check_gpa_range(kvm, mop->gaddr, mop->size, acc_mode, mop->key); in kvm_s390_vm_mem_op_abs()
2893 r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf, in kvm_s390_vm_mem_op_abs()
2894 mop->size, GACC_FETCH, mop->key); in kvm_s390_vm_mem_op_abs()
2897 if (copy_to_user(uaddr, tmpbuf, mop->size)) in kvm_s390_vm_mem_op_abs()
2898 r = -EFAULT; in kvm_s390_vm_mem_op_abs()
2900 if (copy_from_user(tmpbuf, uaddr, mop->size)) { in kvm_s390_vm_mem_op_abs()
2901 r = -EFAULT; in kvm_s390_vm_mem_op_abs()
2904 r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf, in kvm_s390_vm_mem_op_abs()
2905 mop->size, GACC_STORE, mop->key); in kvm_s390_vm_mem_op_abs()
2909 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_vm_mem_op_abs()
2917 void __user *uaddr = (void __user *)mop->buf; in kvm_s390_vm_mem_op_cmpxchg()
2918 void __user *old_addr = (void __user *)mop->old_addr; in kvm_s390_vm_mem_op_cmpxchg()
2923 unsigned int off_in_quad = sizeof(new) - mop->size; in kvm_s390_vm_mem_op_cmpxchg()
2935 if (mop->size > sizeof(new)) in kvm_s390_vm_mem_op_cmpxchg()
2936 return -EINVAL; in kvm_s390_vm_mem_op_cmpxchg()
2937 if (copy_from_user(&new.raw[off_in_quad], uaddr, mop->size)) in kvm_s390_vm_mem_op_cmpxchg()
2938 return -EFAULT; in kvm_s390_vm_mem_op_cmpxchg()
2939 if (copy_from_user(&old.raw[off_in_quad], old_addr, mop->size)) in kvm_s390_vm_mem_op_cmpxchg()
2940 return -EFAULT; in kvm_s390_vm_mem_op_cmpxchg()
2942 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_vm_mem_op_cmpxchg()
2944 if (kvm_is_error_gpa(kvm, mop->gaddr)) { in kvm_s390_vm_mem_op_cmpxchg()
2949 r = cmpxchg_guest_abs_with_key(kvm, mop->gaddr, mop->size, &old.quad, in kvm_s390_vm_mem_op_cmpxchg()
2950 new.quad, mop->key, &success); in kvm_s390_vm_mem_op_cmpxchg()
2951 if (!success && copy_to_user(old_addr, &old.raw[off_in_quad], mop->size)) in kvm_s390_vm_mem_op_cmpxchg()
2952 r = -EFAULT; in kvm_s390_vm_mem_op_cmpxchg()
2955 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_vm_mem_op_cmpxchg()
2962 * This is technically a heuristic only, if the kvm->lock is not in kvm_s390_vm_mem_op()
2963 * taken, it is not guaranteed that the vm is/remains non-protected. in kvm_s390_vm_mem_op()
2965 * on the access, -EFAULT is returned and the vm may crash the in kvm_s390_vm_mem_op()
2971 return -EINVAL; in kvm_s390_vm_mem_op()
2973 switch (mop->op) { in kvm_s390_vm_mem_op()
2980 return -EINVAL; in kvm_s390_vm_mem_op()
2986 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl()
2995 r = -EFAULT; in kvm_arch_vm_ioctl()
3004 r = -EINVAL; in kvm_arch_vm_ioctl()
3005 if (kvm->arch.use_irqchip) { in kvm_arch_vm_ioctl()
3013 r = -EFAULT; in kvm_arch_vm_ioctl()
3020 r = -EFAULT; in kvm_arch_vm_ioctl()
3027 r = -EFAULT; in kvm_arch_vm_ioctl()
3036 r = -EFAULT; in kvm_arch_vm_ioctl()
3046 r = -EFAULT; in kvm_arch_vm_ioctl()
3056 r = -EFAULT; in kvm_arch_vm_ioctl()
3059 mutex_lock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3061 mutex_unlock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3065 r = -EFAULT; in kvm_arch_vm_ioctl()
3072 r = -EFAULT; in kvm_arch_vm_ioctl()
3075 mutex_lock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3077 mutex_unlock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3087 r = -EINVAL; in kvm_arch_vm_ioctl()
3091 r = -EFAULT; in kvm_arch_vm_ioctl()
3095 r = -EINVAL; in kvm_arch_vm_ioctl()
3098 /* must be called without kvm->lock */ in kvm_arch_vm_ioctl()
3101 r = -EFAULT; in kvm_arch_vm_ioctl()
3112 r = -EFAULT; in kvm_arch_vm_ioctl()
3118 r = -EINVAL; in kvm_arch_vm_ioctl()
3122 r = -EFAULT; in kvm_arch_vm_ioctl()
3129 r = -ENOTTY; in kvm_arch_vm_ioctl()
3157 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb; in kvm_s390_set_crycb_format()
3159 /* Clear the CRYCB format bits - i.e., set format 0 by default */ in kvm_s390_set_crycb_format()
3160 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK); in kvm_s390_set_crycb_format()
3167 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2; in kvm_s390_set_crycb_format()
3169 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; in kvm_s390_set_crycb_format()
3184 * Note: The kvm->lock mutex must be locked by the caller before invoking this
3190 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb; in kvm_arch_crypto_set_masks()
3194 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) { in kvm_arch_crypto_set_masks()
3196 memcpy(crycb->apcb1.apm, apm, 32); in kvm_arch_crypto_set_masks()
3199 memcpy(crycb->apcb1.aqm, aqm, 32); in kvm_arch_crypto_set_masks()
3202 memcpy(crycb->apcb1.adm, adm, 32); in kvm_arch_crypto_set_masks()
3208 memcpy(crycb->apcb0.apm, apm, 8); in kvm_arch_crypto_set_masks()
3209 memcpy(crycb->apcb0.aqm, aqm, 2); in kvm_arch_crypto_set_masks()
3210 memcpy(crycb->apcb0.adm, adm, 2); in kvm_arch_crypto_set_masks()
3234 * Note: The kvm->lock mutex must be locked by the caller before invoking this
3241 memset(&kvm->arch.crypto.crycb->apcb0, 0, in kvm_arch_crypto_clear_masks()
3242 sizeof(kvm->arch.crypto.crycb->apcb0)); in kvm_arch_crypto_clear_masks()
3243 memset(&kvm->arch.crypto.crycb->apcb1, 0, in kvm_arch_crypto_clear_masks()
3244 sizeof(kvm->arch.crypto.crycb->apcb1)); in kvm_arch_crypto_clear_masks()
3264 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb; in kvm_s390_crypto_init()
3266 init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem); in kvm_s390_crypto_init()
3272 kvm->arch.crypto.aes_kw = 1; in kvm_s390_crypto_init()
3273 kvm->arch.crypto.dea_kw = 1; in kvm_s390_crypto_init()
3274 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_crypto_init()
3275 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_crypto_init()
3276 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_crypto_init()
3277 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_crypto_init()
3282 if (kvm->arch.use_esca) in sca_dispose()
3283 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block)); in sca_dispose()
3285 free_page((unsigned long)(kvm->arch.sca)); in sca_dispose()
3286 kvm->arch.sca = NULL; in sca_dispose()
3304 rc = -EINVAL; in kvm_arch_init_vm()
3319 rc = -ENOMEM; in kvm_arch_init_vm()
3323 rwlock_init(&kvm->arch.sca_lock); in kvm_arch_init_vm()
3325 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags); in kvm_arch_init_vm()
3326 if (!kvm->arch.sca) in kvm_arch_init_vm()
3332 kvm->arch.sca = (struct bsca_block *) in kvm_arch_init_vm()
3333 ((char *) kvm->arch.sca + sca_offset); in kvm_arch_init_vm()
3336 sprintf(debug_name, "kvm-%u", current->pid); in kvm_arch_init_vm()
3338 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long)); in kvm_arch_init_vm()
3339 if (!kvm->arch.dbf) in kvm_arch_init_vm()
3343 kvm->arch.sie_page2 = in kvm_arch_init_vm()
3345 if (!kvm->arch.sie_page2) in kvm_arch_init_vm()
3348 kvm->arch.sie_page2->kvm = kvm; in kvm_arch_init_vm()
3349 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list; in kvm_arch_init_vm()
3352 kvm->arch.model.fac_mask[i] = stfle_fac_list[i] & in kvm_arch_init_vm()
3355 kvm->arch.model.fac_list[i] = stfle_fac_list[i] & in kvm_arch_init_vm()
3358 kvm->arch.model.subfuncs = kvm_s390_available_subfunc; in kvm_arch_init_vm()
3360 /* we are always in czam mode - even on pre z14 machines */ in kvm_arch_init_vm()
3361 set_kvm_facility(kvm->arch.model.fac_mask, 138); in kvm_arch_init_vm()
3362 set_kvm_facility(kvm->arch.model.fac_list, 138); in kvm_arch_init_vm()
3364 set_kvm_facility(kvm->arch.model.fac_mask, 74); in kvm_arch_init_vm()
3365 set_kvm_facility(kvm->arch.model.fac_list, 74); in kvm_arch_init_vm()
3367 set_kvm_facility(kvm->arch.model.fac_mask, 147); in kvm_arch_init_vm()
3368 set_kvm_facility(kvm->arch.model.fac_list, 147); in kvm_arch_init_vm()
3372 set_kvm_facility(kvm->arch.model.fac_mask, 65); in kvm_arch_init_vm()
3374 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid(); in kvm_arch_init_vm()
3375 kvm->arch.model.ibc = sclp.ibc & 0x0fff; in kvm_arch_init_vm()
3377 kvm->arch.model.uv_feat_guest.feat = 0; in kvm_arch_init_vm()
3382 mutex_lock(&kvm->lock); in kvm_arch_init_vm()
3385 mutex_unlock(&kvm->lock); in kvm_arch_init_vm()
3388 mutex_init(&kvm->arch.float_int.ais_lock); in kvm_arch_init_vm()
3389 spin_lock_init(&kvm->arch.float_int.lock); in kvm_arch_init_vm()
3391 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]); in kvm_arch_init_vm()
3392 init_waitqueue_head(&kvm->arch.ipte_wq); in kvm_arch_init_vm()
3393 mutex_init(&kvm->arch.ipte_mutex); in kvm_arch_init_vm()
3395 debug_register_view(kvm->arch.dbf, &debug_sprintf_view); in kvm_arch_init_vm()
3399 kvm->arch.gmap = NULL; in kvm_arch_init_vm()
3400 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT; in kvm_arch_init_vm()
3403 kvm->arch.mem_limit = TASK_SIZE_MAX; in kvm_arch_init_vm()
3405 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX, in kvm_arch_init_vm()
3407 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1); in kvm_arch_init_vm()
3408 if (!kvm->arch.gmap) in kvm_arch_init_vm()
3410 kvm->arch.gmap->private = kvm; in kvm_arch_init_vm()
3411 kvm->arch.gmap->pfault_enabled = 0; in kvm_arch_init_vm()
3414 kvm->arch.use_pfmfi = sclp.has_pfmfi; in kvm_arch_init_vm()
3415 kvm->arch.use_skf = sclp.has_skey; in kvm_arch_init_vm()
3416 spin_lock_init(&kvm->arch.start_stop_lock); in kvm_arch_init_vm()
3420 INIT_LIST_HEAD(&kvm->arch.pv.need_cleanup); in kvm_arch_init_vm()
3421 kvm->arch.pv.set_aside = NULL; in kvm_arch_init_vm()
3422 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid); in kvm_arch_init_vm()
3426 free_page((unsigned long)kvm->arch.sie_page2); in kvm_arch_init_vm()
3427 debug_unregister(kvm->arch.dbf); in kvm_arch_init_vm()
3438 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id); in kvm_arch_vcpu_destroy()
3441 if (!kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_destroy()
3443 kvm_s390_update_topology_change_report(vcpu->kvm, 1); in kvm_arch_vcpu_destroy()
3445 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_destroy()
3446 gmap_remove(vcpu->arch.gmap); in kvm_arch_vcpu_destroy()
3448 if (vcpu->kvm->arch.use_cmma) in kvm_arch_vcpu_destroy()
3453 free_page((unsigned long)(vcpu->arch.sie_block)); in kvm_arch_vcpu_destroy()
3464 * We are already at the end of life and kvm->lock is not taken. in kvm_arch_destroy_vm()
3475 if (kvm->arch.pv.mmu_notifier.ops) in kvm_arch_destroy_vm()
3476 mmu_notifier_unregister(&kvm->arch.pv.mmu_notifier, kvm->mm); in kvm_arch_destroy_vm()
3478 debug_unregister(kvm->arch.dbf); in kvm_arch_destroy_vm()
3479 free_page((unsigned long)kvm->arch.sie_page2); in kvm_arch_destroy_vm()
3481 gmap_remove(kvm->arch.gmap); in kvm_arch_destroy_vm()
3491 vcpu->arch.gmap = gmap_create(current->mm, -1UL); in __kvm_ucontrol_vcpu_init()
3492 if (!vcpu->arch.gmap) in __kvm_ucontrol_vcpu_init()
3493 return -ENOMEM; in __kvm_ucontrol_vcpu_init()
3494 vcpu->arch.gmap->private = vcpu->kvm; in __kvm_ucontrol_vcpu_init()
3503 read_lock(&vcpu->kvm->arch.sca_lock); in sca_del_vcpu()
3504 if (vcpu->kvm->arch.use_esca) { in sca_del_vcpu()
3505 struct esca_block *sca = vcpu->kvm->arch.sca; in sca_del_vcpu()
3507 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn); in sca_del_vcpu()
3508 sca->cpu[vcpu->vcpu_id].sda = 0; in sca_del_vcpu()
3510 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_del_vcpu()
3512 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn); in sca_del_vcpu()
3513 sca->cpu[vcpu->vcpu_id].sda = 0; in sca_del_vcpu()
3515 read_unlock(&vcpu->kvm->arch.sca_lock); in sca_del_vcpu()
3521 phys_addr_t sca_phys = virt_to_phys(vcpu->kvm->arch.sca); in sca_add_vcpu()
3524 vcpu->arch.sie_block->scaoh = sca_phys >> 32; in sca_add_vcpu()
3525 vcpu->arch.sie_block->scaol = sca_phys; in sca_add_vcpu()
3528 read_lock(&vcpu->kvm->arch.sca_lock); in sca_add_vcpu()
3529 if (vcpu->kvm->arch.use_esca) { in sca_add_vcpu()
3530 struct esca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
3533 sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block); in sca_add_vcpu()
3534 vcpu->arch.sie_block->scaoh = sca_phys >> 32; in sca_add_vcpu()
3535 vcpu->arch.sie_block->scaol = sca_phys & ESCA_SCAOL_MASK; in sca_add_vcpu()
3536 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA; in sca_add_vcpu()
3537 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn); in sca_add_vcpu()
3539 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
3542 sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block); in sca_add_vcpu()
3543 vcpu->arch.sie_block->scaoh = sca_phys >> 32; in sca_add_vcpu()
3544 vcpu->arch.sie_block->scaol = sca_phys; in sca_add_vcpu()
3545 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn); in sca_add_vcpu()
3547 read_unlock(&vcpu->kvm->arch.sca_lock); in sca_add_vcpu()
3553 d->sda = s->sda; in sca_copy_entry()
3554 d->sigp_ctrl.c = s->sigp_ctrl.c; in sca_copy_entry()
3555 d->sigp_ctrl.scn = s->sigp_ctrl.scn; in sca_copy_entry()
3562 d->ipte_control = s->ipte_control; in sca_copy_b_to_e()
3563 d->mcn[0] = s->mcn; in sca_copy_b_to_e()
3565 sca_copy_entry(&d->cpu[i], &s->cpu[i]); in sca_copy_b_to_e()
3570 struct bsca_block *old_sca = kvm->arch.sca; in sca_switch_to_extended()
3577 if (kvm->arch.use_esca) in sca_switch_to_extended()
3582 return -ENOMEM; in sca_switch_to_extended()
3589 write_lock(&kvm->arch.sca_lock); in sca_switch_to_extended()
3594 vcpu->arch.sie_block->scaoh = scaoh; in sca_switch_to_extended()
3595 vcpu->arch.sie_block->scaol = scaol; in sca_switch_to_extended()
3596 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA; in sca_switch_to_extended()
3598 kvm->arch.sca = new_sca; in sca_switch_to_extended()
3599 kvm->arch.use_esca = 1; in sca_switch_to_extended()
3601 write_unlock(&kvm->arch.sca_lock); in sca_switch_to_extended()
3606 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)", in sca_switch_to_extended()
3607 old_sca, kvm->arch.sca); in sca_switch_to_extended()
3625 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm); in sca_can_add_vcpu()
3633 WARN_ON_ONCE(vcpu->arch.cputm_start != 0); in __start_cpu_timer_accounting()
3634 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); in __start_cpu_timer_accounting()
3635 vcpu->arch.cputm_start = get_tod_clock_fast(); in __start_cpu_timer_accounting()
3636 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); in __start_cpu_timer_accounting()
3642 WARN_ON_ONCE(vcpu->arch.cputm_start == 0); in __stop_cpu_timer_accounting()
3643 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); in __stop_cpu_timer_accounting()
3644 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start; in __stop_cpu_timer_accounting()
3645 vcpu->arch.cputm_start = 0; in __stop_cpu_timer_accounting()
3646 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); in __stop_cpu_timer_accounting()
3652 WARN_ON_ONCE(vcpu->arch.cputm_enabled); in __enable_cpu_timer_accounting()
3653 vcpu->arch.cputm_enabled = true; in __enable_cpu_timer_accounting()
3660 WARN_ON_ONCE(!vcpu->arch.cputm_enabled); in __disable_cpu_timer_accounting()
3662 vcpu->arch.cputm_enabled = false; in __disable_cpu_timer_accounting()
3679 /* set the cpu timer - may only be called from the VCPU thread itself */
3683 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); in kvm_s390_set_cpu_timer()
3684 if (vcpu->arch.cputm_enabled) in kvm_s390_set_cpu_timer()
3685 vcpu->arch.cputm_start = get_tod_clock_fast(); in kvm_s390_set_cpu_timer()
3686 vcpu->arch.sie_block->cputm = cputm; in kvm_s390_set_cpu_timer()
3687 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); in kvm_s390_set_cpu_timer()
3691 /* update and get the cpu timer - can also be called from other VCPU threads */
3697 if (unlikely(!vcpu->arch.cputm_enabled)) in kvm_s390_get_cpu_timer()
3698 return vcpu->arch.sie_block->cputm; in kvm_s390_get_cpu_timer()
3702 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount); in kvm_s390_get_cpu_timer()
3707 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu); in kvm_s390_get_cpu_timer()
3708 value = vcpu->arch.sie_block->cputm; in kvm_s390_get_cpu_timer()
3710 if (likely(vcpu->arch.cputm_start)) in kvm_s390_get_cpu_timer()
3711 value -= get_tod_clock_fast() - vcpu->arch.cputm_start; in kvm_s390_get_cpu_timer()
3712 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1)); in kvm_s390_get_cpu_timer()
3720 gmap_enable(vcpu->arch.enabled_gmap); in kvm_arch_vcpu_load()
3722 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) in kvm_arch_vcpu_load()
3724 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
3729 vcpu->cpu = -1; in kvm_arch_vcpu_put()
3730 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) in kvm_arch_vcpu_put()
3733 vcpu->arch.enabled_gmap = gmap_get_enabled(); in kvm_arch_vcpu_put()
3734 gmap_disable(vcpu->arch.enabled_gmap); in kvm_arch_vcpu_put()
3740 mutex_lock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
3742 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; in kvm_arch_vcpu_postcreate()
3743 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx; in kvm_arch_vcpu_postcreate()
3745 mutex_unlock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
3746 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_postcreate()
3747 vcpu->arch.gmap = vcpu->kvm->arch.gmap; in kvm_arch_vcpu_postcreate()
3750 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0) in kvm_arch_vcpu_postcreate()
3751 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC; in kvm_arch_vcpu_postcreate()
3753 vcpu->arch.enabled_gmap = vcpu->arch.gmap; in kvm_arch_vcpu_postcreate()
3758 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) && in kvm_has_pckmo_subfunc()
3781 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76)) in kvm_s390_vcpu_crypto_setup()
3784 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; in kvm_s390_vcpu_crypto_setup()
3785 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA); in kvm_s390_vcpu_crypto_setup()
3786 vcpu->arch.sie_block->eca &= ~ECA_APIE; in kvm_s390_vcpu_crypto_setup()
3787 vcpu->arch.sie_block->ecd &= ~ECD_ECC; in kvm_s390_vcpu_crypto_setup()
3789 if (vcpu->kvm->arch.crypto.apie) in kvm_s390_vcpu_crypto_setup()
3790 vcpu->arch.sie_block->eca |= ECA_APIE; in kvm_s390_vcpu_crypto_setup()
3793 if (vcpu->kvm->arch.crypto.aes_kw) { in kvm_s390_vcpu_crypto_setup()
3794 vcpu->arch.sie_block->ecb3 |= ECB3_AES; in kvm_s390_vcpu_crypto_setup()
3796 if (kvm_has_pckmo_ecc(vcpu->kvm)) in kvm_s390_vcpu_crypto_setup()
3797 vcpu->arch.sie_block->ecd |= ECD_ECC; in kvm_s390_vcpu_crypto_setup()
3800 if (vcpu->kvm->arch.crypto.dea_kw) in kvm_s390_vcpu_crypto_setup()
3801 vcpu->arch.sie_block->ecb3 |= ECB3_DEA; in kvm_s390_vcpu_crypto_setup()
3806 free_page((unsigned long)phys_to_virt(vcpu->arch.sie_block->cbrlo)); in kvm_s390_vcpu_unsetup_cmma()
3807 vcpu->arch.sie_block->cbrlo = 0; in kvm_s390_vcpu_unsetup_cmma()
3815 return -ENOMEM; in kvm_s390_vcpu_setup_cmma()
3817 vcpu->arch.sie_block->cbrlo = virt_to_phys(cbrlo_page); in kvm_s390_vcpu_setup_cmma()
3823 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model; in kvm_s390_vcpu_setup_model()
3825 vcpu->arch.sie_block->ibc = model->ibc; in kvm_s390_vcpu_setup_model()
3826 if (test_kvm_facility(vcpu->kvm, 7)) in kvm_s390_vcpu_setup_model()
3827 vcpu->arch.sie_block->fac = virt_to_phys(model->fac_list); in kvm_s390_vcpu_setup_model()
3835 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | in kvm_s390_vcpu_setup()
3839 if (test_kvm_facility(vcpu->kvm, 78)) in kvm_s390_vcpu_setup()
3841 else if (test_kvm_facility(vcpu->kvm, 8)) in kvm_s390_vcpu_setup()
3848 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT; in kvm_s390_vcpu_setup()
3849 if (test_kvm_facility(vcpu->kvm, 9)) in kvm_s390_vcpu_setup()
3850 vcpu->arch.sie_block->ecb |= ECB_SRSI; in kvm_s390_vcpu_setup()
3851 if (test_kvm_facility(vcpu->kvm, 11)) in kvm_s390_vcpu_setup()
3852 vcpu->arch.sie_block->ecb |= ECB_PTF; in kvm_s390_vcpu_setup()
3853 if (test_kvm_facility(vcpu->kvm, 73)) in kvm_s390_vcpu_setup()
3854 vcpu->arch.sie_block->ecb |= ECB_TE; in kvm_s390_vcpu_setup()
3855 if (!kvm_is_ucontrol(vcpu->kvm)) in kvm_s390_vcpu_setup()
3856 vcpu->arch.sie_block->ecb |= ECB_SPECI; in kvm_s390_vcpu_setup()
3858 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi) in kvm_s390_vcpu_setup()
3859 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI; in kvm_s390_vcpu_setup()
3860 if (test_kvm_facility(vcpu->kvm, 130)) in kvm_s390_vcpu_setup()
3861 vcpu->arch.sie_block->ecb2 |= ECB2_IEP; in kvm_s390_vcpu_setup()
3862 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI; in kvm_s390_vcpu_setup()
3864 vcpu->arch.sie_block->eca |= ECA_CEI; in kvm_s390_vcpu_setup()
3866 vcpu->arch.sie_block->eca |= ECA_IB; in kvm_s390_vcpu_setup()
3868 vcpu->arch.sie_block->eca |= ECA_SII; in kvm_s390_vcpu_setup()
3870 vcpu->arch.sie_block->eca |= ECA_SIGPI; in kvm_s390_vcpu_setup()
3871 if (test_kvm_facility(vcpu->kvm, 129)) { in kvm_s390_vcpu_setup()
3872 vcpu->arch.sie_block->eca |= ECA_VX; in kvm_s390_vcpu_setup()
3873 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; in kvm_s390_vcpu_setup()
3875 if (test_kvm_facility(vcpu->kvm, 139)) in kvm_s390_vcpu_setup()
3876 vcpu->arch.sie_block->ecd |= ECD_MEF; in kvm_s390_vcpu_setup()
3877 if (test_kvm_facility(vcpu->kvm, 156)) in kvm_s390_vcpu_setup()
3878 vcpu->arch.sie_block->ecd |= ECD_ETOKENF; in kvm_s390_vcpu_setup()
3879 if (vcpu->arch.sie_block->gd) { in kvm_s390_vcpu_setup()
3880 vcpu->arch.sie_block->eca |= ECA_AIV; in kvm_s390_vcpu_setup()
3881 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u", in kvm_s390_vcpu_setup()
3882 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id); in kvm_s390_vcpu_setup()
3884 vcpu->arch.sie_block->sdnxo = virt_to_phys(&vcpu->run->s.regs.sdnx) | SDNXC; in kvm_s390_vcpu_setup()
3885 vcpu->arch.sie_block->riccbd = virt_to_phys(&vcpu->run->s.regs.riccb); in kvm_s390_vcpu_setup()
3890 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; in kvm_s390_vcpu_setup()
3892 if (vcpu->kvm->arch.use_cmma) { in kvm_s390_vcpu_setup()
3897 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in kvm_s390_vcpu_setup()
3898 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; in kvm_s390_vcpu_setup()
3900 vcpu->arch.sie_block->hpid = HPID_KVM; in kvm_s390_vcpu_setup()
3906 mutex_lock(&vcpu->kvm->lock); in kvm_s390_vcpu_setup()
3907 if (kvm_s390_pv_is_protected(vcpu->kvm)) { in kvm_s390_vcpu_setup()
3912 mutex_unlock(&vcpu->kvm->lock); in kvm_s390_vcpu_setup()
3920 return -EINVAL; in kvm_arch_vcpu_precreate()
3932 return -ENOMEM; in kvm_arch_vcpu_create()
3934 vcpu->arch.sie_block = &sie_page->sie_block; in kvm_arch_vcpu_create()
3935 vcpu->arch.sie_block->itdba = virt_to_phys(&sie_page->itdb); in kvm_arch_vcpu_create()
3938 vcpu->arch.sie_block->mso = 0; in kvm_arch_vcpu_create()
3939 vcpu->arch.sie_block->msl = sclp.hamax; in kvm_arch_vcpu_create()
3941 vcpu->arch.sie_block->icpua = vcpu->vcpu_id; in kvm_arch_vcpu_create()
3942 spin_lock_init(&vcpu->arch.local_int.lock); in kvm_arch_vcpu_create()
3943 vcpu->arch.sie_block->gd = kvm_s390_get_gisa_desc(vcpu->kvm); in kvm_arch_vcpu_create()
3944 seqcount_init(&vcpu->arch.cputm_seqcount); in kvm_arch_vcpu_create()
3946 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; in kvm_arch_vcpu_create()
3948 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | in kvm_arch_vcpu_create()
3956 if (test_kvm_facility(vcpu->kvm, 64)) in kvm_arch_vcpu_create()
3957 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB; in kvm_arch_vcpu_create()
3958 if (test_kvm_facility(vcpu->kvm, 82)) in kvm_arch_vcpu_create()
3959 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC; in kvm_arch_vcpu_create()
3960 if (test_kvm_facility(vcpu->kvm, 133)) in kvm_arch_vcpu_create()
3961 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB; in kvm_arch_vcpu_create()
3962 if (test_kvm_facility(vcpu->kvm, 156)) in kvm_arch_vcpu_create()
3963 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN; in kvm_arch_vcpu_create()
3968 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS; in kvm_arch_vcpu_create()
3970 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS; in kvm_arch_vcpu_create()
3972 if (kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_create()
3978 VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", in kvm_arch_vcpu_create()
3979 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block); in kvm_arch_vcpu_create()
3980 trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block); in kvm_arch_vcpu_create()
3986 kvm_s390_update_topology_change_report(vcpu->kvm, 1); in kvm_arch_vcpu_create()
3990 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_create()
3991 gmap_remove(vcpu->arch.gmap); in kvm_arch_vcpu_create()
3993 free_page((unsigned long)(vcpu->arch.sie_block)); in kvm_arch_vcpu_create()
3999 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask); in kvm_arch_vcpu_runnable()
4005 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE); in kvm_arch_vcpu_in_kernel()
4010 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_block()
4016 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_unblock()
4021 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_request()
4027 return atomic_read(&vcpu->arch.sie_block->prog20) & in kvm_s390_vcpu_sie_inhibited()
4033 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_request_handled()
4044 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) in exit_sie()
4058 struct kvm *kvm = gmap->private; in kvm_gmap_notifier()
4071 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) { in kvm_gmap_notifier()
4072 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx", in kvm_gmap_notifier()
4084 vcpu->stat.halt_no_poll_steal++; in kvm_arch_no_poll()
4100 int r = -EINVAL; in kvm_arch_vcpu_ioctl_get_one_reg()
4102 switch (reg->id) { in kvm_arch_vcpu_ioctl_get_one_reg()
4104 r = put_user(vcpu->arch.sie_block->todpr, in kvm_arch_vcpu_ioctl_get_one_reg()
4105 (u32 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4108 r = put_user(vcpu->arch.sie_block->epoch, in kvm_arch_vcpu_ioctl_get_one_reg()
4109 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4113 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4116 r = put_user(vcpu->arch.sie_block->ckc, in kvm_arch_vcpu_ioctl_get_one_reg()
4117 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4120 r = put_user(vcpu->arch.pfault_token, in kvm_arch_vcpu_ioctl_get_one_reg()
4121 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4124 r = put_user(vcpu->arch.pfault_compare, in kvm_arch_vcpu_ioctl_get_one_reg()
4125 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4128 r = put_user(vcpu->arch.pfault_select, in kvm_arch_vcpu_ioctl_get_one_reg()
4129 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4132 r = put_user(vcpu->arch.sie_block->pp, in kvm_arch_vcpu_ioctl_get_one_reg()
4133 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4136 r = put_user(vcpu->arch.sie_block->gbea, in kvm_arch_vcpu_ioctl_get_one_reg()
4137 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4149 int r = -EINVAL; in kvm_arch_vcpu_ioctl_set_one_reg()
4152 switch (reg->id) { in kvm_arch_vcpu_ioctl_set_one_reg()
4154 r = get_user(vcpu->arch.sie_block->todpr, in kvm_arch_vcpu_ioctl_set_one_reg()
4155 (u32 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4158 r = get_user(vcpu->arch.sie_block->epoch, in kvm_arch_vcpu_ioctl_set_one_reg()
4159 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4162 r = get_user(val, (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4167 r = get_user(vcpu->arch.sie_block->ckc, in kvm_arch_vcpu_ioctl_set_one_reg()
4168 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4171 r = get_user(vcpu->arch.pfault_token, in kvm_arch_vcpu_ioctl_set_one_reg()
4172 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4173 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in kvm_arch_vcpu_ioctl_set_one_reg()
4177 r = get_user(vcpu->arch.pfault_compare, in kvm_arch_vcpu_ioctl_set_one_reg()
4178 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4181 r = get_user(vcpu->arch.pfault_select, in kvm_arch_vcpu_ioctl_set_one_reg()
4182 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4185 r = get_user(vcpu->arch.sie_block->pp, in kvm_arch_vcpu_ioctl_set_one_reg()
4186 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4189 r = get_user(vcpu->arch.sie_block->gbea, in kvm_arch_vcpu_ioctl_set_one_reg()
4190 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4201 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI; in kvm_arch_vcpu_ioctl_normal_reset()
4202 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; in kvm_arch_vcpu_ioctl_normal_reset()
4203 memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb)); in kvm_arch_vcpu_ioctl_normal_reset()
4206 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) in kvm_arch_vcpu_ioctl_normal_reset()
4220 vcpu->arch.sie_block->gpsw.mask = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4221 vcpu->arch.sie_block->gpsw.addr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4224 vcpu->arch.sie_block->ckc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4225 memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr)); in kvm_arch_vcpu_ioctl_initial_reset()
4226 vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
4227 vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
4230 memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs)); in kvm_arch_vcpu_ioctl_initial_reset()
4231 vcpu->run->s.regs.ckc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4232 vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
4233 vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
4234 vcpu->run->psw_addr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4235 vcpu->run->psw_mask = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4236 vcpu->run->s.regs.todpr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4237 vcpu->run->s.regs.cputm = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4238 vcpu->run->s.regs.ckc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4239 vcpu->run->s.regs.pp = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4240 vcpu->run->s.regs.gbea = 1; in kvm_arch_vcpu_ioctl_initial_reset()
4241 vcpu->run->s.regs.fpc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4248 vcpu->arch.sie_block->gbea = 1; in kvm_arch_vcpu_ioctl_initial_reset()
4249 vcpu->arch.sie_block->pp = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4250 vcpu->arch.sie_block->fpf &= ~FPF_BPBC; in kvm_arch_vcpu_ioctl_initial_reset()
4251 vcpu->arch.sie_block->todpr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4257 struct kvm_sync_regs *regs = &vcpu->run->s.regs; in kvm_arch_vcpu_ioctl_clear_reset()
4262 memset(&regs->gprs, 0, sizeof(regs->gprs)); in kvm_arch_vcpu_ioctl_clear_reset()
4263 memset(&regs->vrs, 0, sizeof(regs->vrs)); in kvm_arch_vcpu_ioctl_clear_reset()
4264 memset(&regs->acrs, 0, sizeof(regs->acrs)); in kvm_arch_vcpu_ioctl_clear_reset()
4265 memset(&regs->gscb, 0, sizeof(regs->gscb)); in kvm_arch_vcpu_ioctl_clear_reset()
4267 regs->etoken = 0; in kvm_arch_vcpu_ioctl_clear_reset()
4268 regs->etoken_extension = 0; in kvm_arch_vcpu_ioctl_clear_reset()
4274 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs)); in kvm_arch_vcpu_ioctl_set_regs()
4282 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs)); in kvm_arch_vcpu_ioctl_get_regs()
4292 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs)); in kvm_arch_vcpu_ioctl_set_sregs()
4293 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); in kvm_arch_vcpu_ioctl_set_sregs()
4304 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs)); in kvm_arch_vcpu_ioctl_get_sregs()
4305 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); in kvm_arch_vcpu_ioctl_get_sregs()
4317 vcpu->run->s.regs.fpc = fpu->fpc; in kvm_arch_vcpu_ioctl_set_fpu()
4319 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs, in kvm_arch_vcpu_ioctl_set_fpu()
4320 (freg_t *) fpu->fprs); in kvm_arch_vcpu_ioctl_set_fpu()
4322 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs)); in kvm_arch_vcpu_ioctl_set_fpu()
4335 convert_vx_to_fp((freg_t *) fpu->fprs, in kvm_arch_vcpu_ioctl_get_fpu()
4336 (__vector128 *) vcpu->run->s.regs.vrs); in kvm_arch_vcpu_ioctl_get_fpu()
4338 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs)); in kvm_arch_vcpu_ioctl_get_fpu()
4339 fpu->fpc = vcpu->run->s.regs.fpc; in kvm_arch_vcpu_ioctl_get_fpu()
4350 rc = -EBUSY; in kvm_arch_vcpu_ioctl_set_initial_psw()
4352 vcpu->run->psw_mask = psw.mask; in kvm_arch_vcpu_ioctl_set_initial_psw()
4353 vcpu->run->psw_addr = psw.addr; in kvm_arch_vcpu_ioctl_set_initial_psw()
4361 return -EINVAL; /* not implemented yet */ in kvm_arch_vcpu_ioctl_translate()
4375 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
4378 if (dbg->control & ~VALID_GUESTDBG_FLAGS) { in kvm_arch_vcpu_ioctl_set_guest_debug()
4379 rc = -EINVAL; in kvm_arch_vcpu_ioctl_set_guest_debug()
4383 rc = -EINVAL; in kvm_arch_vcpu_ioctl_set_guest_debug()
4387 if (dbg->control & KVM_GUESTDBG_ENABLE) { in kvm_arch_vcpu_ioctl_set_guest_debug()
4388 vcpu->guest_debug = dbg->control; in kvm_arch_vcpu_ioctl_set_guest_debug()
4392 if (dbg->control & KVM_GUESTDBG_USE_HW_BP) in kvm_arch_vcpu_ioctl_set_guest_debug()
4396 vcpu->arch.guestdbg.last_bp = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
4400 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
4432 /* user space knows about this interface - let it control the state */ in kvm_arch_vcpu_ioctl_set_mpstate()
4433 kvm_s390_set_user_cpu_state_ctrl(vcpu->kvm); in kvm_arch_vcpu_ioctl_set_mpstate()
4435 switch (mp_state->mp_state) { in kvm_arch_vcpu_ioctl_set_mpstate()
4444 rc = -ENXIO; in kvm_arch_vcpu_ioctl_set_mpstate()
4452 rc = -ENXIO; in kvm_arch_vcpu_ioctl_set_mpstate()
4471 * If the guest prefix changed, re-arm the ipte notifier for the in kvm_s390_handle_requests()
4479 rc = gmap_mprotect_notify(vcpu->arch.gmap, in kvm_s390_handle_requests()
4490 vcpu->arch.sie_block->ihcpu = 0xffff; in kvm_s390_handle_requests()
4496 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); in kvm_s390_handle_requests()
4504 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); in kvm_s390_handle_requests()
4511 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC; in kvm_s390_handle_requests()
4521 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA; in kvm_s390_handle_requests()
4527 * Re-enable CMM virtualization if CMMA is available and in kvm_s390_handle_requests()
4530 if ((vcpu->kvm->arch.use_cmma) && in kvm_s390_handle_requests()
4531 (vcpu->kvm->mm->context.uses_cmm)) in kvm_s390_handle_requests()
4532 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA; in kvm_s390_handle_requests()
4552 kvm->arch.epoch = gtod->tod - clk.tod; in __kvm_s390_set_tod_clock()
4553 kvm->arch.epdx = 0; in __kvm_s390_set_tod_clock()
4555 kvm->arch.epdx = gtod->epoch_idx - clk.ei; in __kvm_s390_set_tod_clock()
4556 if (kvm->arch.epoch > gtod->tod) in __kvm_s390_set_tod_clock()
4557 kvm->arch.epdx -= 1; in __kvm_s390_set_tod_clock()
4562 vcpu->arch.sie_block->epoch = kvm->arch.epoch; in __kvm_s390_set_tod_clock()
4563 vcpu->arch.sie_block->epdx = kvm->arch.epdx; in __kvm_s390_set_tod_clock()
4572 if (!mutex_trylock(&kvm->lock)) in kvm_s390_try_set_tod_clock()
4575 mutex_unlock(&kvm->lock); in kvm_s390_try_set_tod_clock()
4580 * kvm_arch_fault_in_page - fault-in guest page if necessary
4585 * Make sure that a guest page has been faulted-in on the host.
4591 return gmap_fault(vcpu->arch.gmap, gpa, in kvm_arch_fault_in_page()
4608 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); in __kvm_inject_pfault_token()
4615 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token); in kvm_arch_async_page_not_present()
4616 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token); in kvm_arch_async_page_not_present()
4624 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token); in kvm_arch_async_page_present()
4625 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token); in kvm_arch_async_page_present()
4648 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in kvm_arch_setup_async_pf()
4650 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) != in kvm_arch_setup_async_pf()
4651 vcpu->arch.pfault_compare) in kvm_arch_setup_async_pf()
4657 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK)) in kvm_arch_setup_async_pf()
4659 if (!vcpu->arch.gmap->pfault_enabled) in kvm_arch_setup_async_pf()
4662 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr)); in kvm_arch_setup_async_pf()
4663 hva += current->thread.gmap_addr & ~PAGE_MASK; in kvm_arch_setup_async_pf()
4664 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8)) in kvm_arch_setup_async_pf()
4667 return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch); in kvm_arch_setup_async_pf()
4681 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14]; in vcpu_pre_run()
4682 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15]; in vcpu_pre_run()
4687 if (!kvm_is_ucontrol(vcpu->kvm)) { in vcpu_pre_run()
4702 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask); in vcpu_pre_run()
4704 vcpu->arch.sie_block->icptcode = 0; in vcpu_pre_run()
4705 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); in vcpu_pre_run()
4731 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1); in vcpu_post_run_fault_in_sie()
4736 /* Instruction-Fetching Exceptions - we can't detect the ilen. in vcpu_post_run_fault_in_sie()
4740 pgm_info = vcpu->arch.pgm; in vcpu_post_run_fault_in_sie()
4754 vcpu->arch.sie_block->icptcode); in vcpu_post_run()
4755 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); in vcpu_post_run()
4760 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14; in vcpu_post_run()
4761 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15; in vcpu_post_run()
4763 if (exit_reason == -EINTR) { in vcpu_post_run()
4765 sie_page = container_of(vcpu->arch.sie_block, in vcpu_post_run()
4767 mcck_info = &sie_page->mcck_info; in vcpu_post_run()
4772 if (vcpu->arch.sie_block->icptcode > 0) { in vcpu_post_run()
4775 if (rc != -EOPNOTSUPP) in vcpu_post_run()
4777 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC; in vcpu_post_run()
4778 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; in vcpu_post_run()
4779 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; in vcpu_post_run()
4780 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; in vcpu_post_run()
4781 return -EREMOTE; in vcpu_post_run()
4782 } else if (exit_reason != -EFAULT) { in vcpu_post_run()
4783 vcpu->stat.exit_null++; in vcpu_post_run()
4785 } else if (kvm_is_ucontrol(vcpu->kvm)) { in vcpu_post_run()
4786 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL; in vcpu_post_run()
4787 vcpu->run->s390_ucontrol.trans_exc_code = in vcpu_post_run()
4788 current->thread.gmap_addr; in vcpu_post_run()
4789 vcpu->run->s390_ucontrol.pgm_code = 0x10; in vcpu_post_run()
4790 return -EREMOTE; in vcpu_post_run()
4791 } else if (current->thread.gmap_pfault) { in vcpu_post_run()
4793 current->thread.gmap_pfault = 0; in vcpu_post_run()
4796 vcpu->stat.pfault_sync++; in vcpu_post_run()
4797 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1); in vcpu_post_run()
4806 struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block; in __vcpu_run()
4809 * We try to hold kvm->srcu during most of vcpu_run (except when run- in __vcpu_run()
4829 memcpy(sie_page->pv_grregs, in __vcpu_run()
4830 vcpu->run->s.regs.gprs, in __vcpu_run()
4831 sizeof(sie_page->pv_grregs)); in __vcpu_run()
4835 exit_reason = sie64a(vcpu->arch.sie_block, in __vcpu_run()
4836 vcpu->run->s.regs.gprs); in __vcpu_run()
4838 memcpy(vcpu->run->s.regs.gprs, in __vcpu_run()
4839 sie_page->pv_grregs, in __vcpu_run()
4840 sizeof(sie_page->pv_grregs)); in __vcpu_run()
4843 * that leave the guest state in an "in-between" state in __vcpu_run()
4847 if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR || in __vcpu_run()
4848 vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) { in __vcpu_run()
4849 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK; in __vcpu_run()
4867 struct kvm_run *kvm_run = vcpu->run; in sync_regs_fmt2()
4871 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb; in sync_regs_fmt2()
4872 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb; in sync_regs_fmt2()
4873 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; in sync_regs_fmt2()
4874 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; in sync_regs_fmt2()
4875 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) { in sync_regs_fmt2()
4876 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr; in sync_regs_fmt2()
4877 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp; in sync_regs_fmt2()
4878 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea; in sync_regs_fmt2()
4880 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) { in sync_regs_fmt2()
4881 vcpu->arch.pfault_token = kvm_run->s.regs.pft; in sync_regs_fmt2()
4882 vcpu->arch.pfault_select = kvm_run->s.regs.pfs; in sync_regs_fmt2()
4883 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc; in sync_regs_fmt2()
4884 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in sync_regs_fmt2()
4887 if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) { in sync_regs_fmt2()
4888 vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318; in sync_regs_fmt2()
4889 vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc; in sync_regs_fmt2()
4890 VCPU_EVENT(vcpu, 3, "setting cpnc to %d", vcpu->arch.diag318_info.cpnc); in sync_regs_fmt2()
4896 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) && in sync_regs_fmt2()
4897 test_kvm_facility(vcpu->kvm, 64) && in sync_regs_fmt2()
4898 riccb->v && in sync_regs_fmt2()
4899 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) { in sync_regs_fmt2()
4901 vcpu->arch.sie_block->ecb3 |= ECB3_RI; in sync_regs_fmt2()
4904 * If userspace sets the gscb (e.g. after migration) to non-zero, in sync_regs_fmt2()
4907 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) && in sync_regs_fmt2()
4908 test_kvm_facility(vcpu->kvm, 133) && in sync_regs_fmt2()
4909 gscb->gssm && in sync_regs_fmt2()
4910 !vcpu->arch.gs_enabled) { in sync_regs_fmt2()
4912 vcpu->arch.sie_block->ecb |= ECB_GS; in sync_regs_fmt2()
4913 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; in sync_regs_fmt2()
4914 vcpu->arch.gs_enabled = 1; in sync_regs_fmt2()
4916 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) && in sync_regs_fmt2()
4917 test_kvm_facility(vcpu->kvm, 82)) { in sync_regs_fmt2()
4918 vcpu->arch.sie_block->fpf &= ~FPF_BPBC; in sync_regs_fmt2()
4919 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0; in sync_regs_fmt2()
4924 if (current->thread.gs_cb) { in sync_regs_fmt2()
4925 vcpu->arch.host_gscb = current->thread.gs_cb; in sync_regs_fmt2()
4926 save_gs_cb(vcpu->arch.host_gscb); in sync_regs_fmt2()
4928 if (vcpu->arch.gs_enabled) { in sync_regs_fmt2()
4929 current->thread.gs_cb = (struct gs_cb *) in sync_regs_fmt2()
4930 &vcpu->run->s.regs.gscb; in sync_regs_fmt2()
4931 restore_gs_cb(current->thread.gs_cb); in sync_regs_fmt2()
4940 struct kvm_run *kvm_run = vcpu->run; in sync_regs()
4942 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) in sync_regs()
4943 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); in sync_regs()
4944 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) { in sync_regs()
4945 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128); in sync_regs()
4949 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) { in sync_regs()
4950 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm); in sync_regs()
4951 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc; in sync_regs()
4953 save_access_regs(vcpu->arch.host_acrs); in sync_regs()
4954 restore_access_regs(vcpu->run->s.regs.acrs); in sync_regs()
4957 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc; in sync_regs()
4958 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs; in sync_regs()
4960 current->thread.fpu.regs = vcpu->run->s.regs.vrs; in sync_regs()
4962 current->thread.fpu.regs = vcpu->run->s.regs.fprs; in sync_regs()
4963 current->thread.fpu.fpc = vcpu->run->s.regs.fpc; in sync_regs()
4964 if (test_fp_ctl(current->thread.fpu.fpc)) in sync_regs()
4966 current->thread.fpu.fpc = 0; in sync_regs()
4981 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC; in sync_regs()
4982 vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask & in sync_regs()
4986 kvm_run->kvm_dirty_regs = 0; in sync_regs()
4991 struct kvm_run *kvm_run = vcpu->run; in store_regs_fmt2()
4993 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr; in store_regs_fmt2()
4994 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp; in store_regs_fmt2()
4995 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea; in store_regs_fmt2()
4996 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC; in store_regs_fmt2()
4997 kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val; in store_regs_fmt2()
5001 if (vcpu->arch.gs_enabled) in store_regs_fmt2()
5002 save_gs_cb(current->thread.gs_cb); in store_regs_fmt2()
5003 current->thread.gs_cb = vcpu->arch.host_gscb; in store_regs_fmt2()
5004 restore_gs_cb(vcpu->arch.host_gscb); in store_regs_fmt2()
5005 if (!vcpu->arch.host_gscb) in store_regs_fmt2()
5007 vcpu->arch.host_gscb = NULL; in store_regs_fmt2()
5015 struct kvm_run *kvm_run = vcpu->run; in store_regs()
5017 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; in store_regs()
5018 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; in store_regs()
5019 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu); in store_regs()
5020 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); in store_regs()
5021 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu); in store_regs()
5022 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc; in store_regs()
5023 kvm_run->s.regs.pft = vcpu->arch.pfault_token; in store_regs()
5024 kvm_run->s.regs.pfs = vcpu->arch.pfault_select; in store_regs()
5025 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; in store_regs()
5026 save_access_regs(vcpu->run->s.regs.acrs); in store_regs()
5027 restore_access_regs(vcpu->arch.host_acrs); in store_regs()
5030 vcpu->run->s.regs.fpc = current->thread.fpu.fpc; in store_regs()
5032 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc; in store_regs()
5033 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs; in store_regs()
5040 struct kvm_run *kvm_run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
5049 if (vcpu->kvm->arch.pv.dumping) in kvm_arch_vcpu_ioctl_run()
5050 return -EINVAL; in kvm_arch_vcpu_ioctl_run()
5052 if (kvm_run->immediate_exit) in kvm_arch_vcpu_ioctl_run()
5053 return -EINTR; in kvm_arch_vcpu_ioctl_run()
5055 if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS || in kvm_arch_vcpu_ioctl_run()
5056 kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS) in kvm_arch_vcpu_ioctl_run()
5057 return -EINVAL; in kvm_arch_vcpu_ioctl_run()
5073 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { in kvm_arch_vcpu_ioctl_run()
5077 vcpu->vcpu_id); in kvm_arch_vcpu_ioctl_run()
5078 rc = -EINVAL; in kvm_arch_vcpu_ioctl_run()
5089 kvm_run->exit_reason = KVM_EXIT_INTR; in kvm_arch_vcpu_ioctl_run()
5090 rc = -EINTR; in kvm_arch_vcpu_ioctl_run()
5098 if (rc == -EREMOTE) { in kvm_arch_vcpu_ioctl_run()
5108 vcpu->stat.exit_userspace++; in kvm_arch_vcpu_ioctl_run()
5117 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
5118 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
5131 return -EFAULT; in kvm_s390_store_status_unloaded()
5135 return -EFAULT; in kvm_s390_store_status_unloaded()
5138 gpa -= __LC_FPREGS_SAVE_AREA; in kvm_s390_store_status_unloaded()
5142 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs); in kvm_s390_store_status_unloaded()
5147 vcpu->run->s.regs.fprs, 128); in kvm_s390_store_status_unloaded()
5150 vcpu->run->s.regs.gprs, 128); in kvm_s390_store_status_unloaded()
5152 &vcpu->arch.sie_block->gpsw, 16); in kvm_s390_store_status_unloaded()
5156 &vcpu->run->s.regs.fpc, 4); in kvm_s390_store_status_unloaded()
5158 &vcpu->arch.sie_block->todpr, 4); in kvm_s390_store_status_unloaded()
5162 clkcomp = vcpu->arch.sie_block->ckc >> 8; in kvm_s390_store_status_unloaded()
5166 &vcpu->run->s.regs.acrs, 64); in kvm_s390_store_status_unloaded()
5168 &vcpu->arch.sie_block->gcr, 128); in kvm_s390_store_status_unloaded()
5169 return rc ? -EFAULT : 0; in kvm_s390_store_status_unloaded()
5180 vcpu->run->s.regs.fpc = current->thread.fpu.fpc; in kvm_s390_vcpu_store_status()
5181 save_access_regs(vcpu->run->s.regs.acrs); in kvm_s390_vcpu_store_status()
5217 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1); in kvm_s390_vcpu_start()
5219 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
5220 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_start()
5226 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
5232 if (!is_vcpu_stopped(kvm_get_vcpu(vcpu->kvm, i))) in kvm_s390_vcpu_start()
5237 /* we're the only active VCPU -> speed it up */ in kvm_s390_vcpu_start()
5245 __disable_ibs_on_all_vcpus(vcpu->kvm); in kvm_s390_vcpu_start()
5255 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK; in kvm_s390_vcpu_start()
5261 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
5273 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0); in kvm_s390_vcpu_stop()
5275 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
5276 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_stop()
5282 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
5299 struct kvm_vcpu *tmp = kvm_get_vcpu(vcpu->kvm, i); in kvm_s390_vcpu_stop()
5315 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
5324 if (cap->flags) in kvm_vcpu_ioctl_enable_cap()
5325 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
5327 switch (cap->cap) { in kvm_vcpu_ioctl_enable_cap()
5329 if (!vcpu->kvm->arch.css_support) { in kvm_vcpu_ioctl_enable_cap()
5330 vcpu->kvm->arch.css_support = 1; in kvm_vcpu_ioctl_enable_cap()
5331 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support"); in kvm_vcpu_ioctl_enable_cap()
5332 trace_kvm_s390_enable_css(vcpu->kvm); in kvm_vcpu_ioctl_enable_cap()
5337 r = -EINVAL; in kvm_vcpu_ioctl_enable_cap()
5346 void __user *uaddr = (void __user *)mop->buf; in kvm_s390_vcpu_sida_op()
5350 if (mop->flags || !mop->size) in kvm_s390_vcpu_sida_op()
5351 return -EINVAL; in kvm_s390_vcpu_sida_op()
5352 if (mop->size + mop->sida_offset < mop->size) in kvm_s390_vcpu_sida_op()
5353 return -EINVAL; in kvm_s390_vcpu_sida_op()
5354 if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block)) in kvm_s390_vcpu_sida_op()
5355 return -E2BIG; in kvm_s390_vcpu_sida_op()
5357 return -EINVAL; in kvm_s390_vcpu_sida_op()
5359 sida_addr = (char *)sida_addr(vcpu->arch.sie_block) + mop->sida_offset; in kvm_s390_vcpu_sida_op()
5361 switch (mop->op) { in kvm_s390_vcpu_sida_op()
5363 if (copy_to_user(uaddr, sida_addr, mop->size)) in kvm_s390_vcpu_sida_op()
5364 r = -EFAULT; in kvm_s390_vcpu_sida_op()
5368 if (copy_from_user(sida_addr, uaddr, mop->size)) in kvm_s390_vcpu_sida_op()
5369 r = -EFAULT; in kvm_s390_vcpu_sida_op()
5378 void __user *uaddr = (void __user *)mop->buf; in kvm_s390_vcpu_mem_op()
5388 if (mop->ar >= NUM_ACRS) in kvm_s390_vcpu_mem_op()
5389 return -EINVAL; in kvm_s390_vcpu_mem_op()
5391 return -EINVAL; in kvm_s390_vcpu_mem_op()
5392 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) { in kvm_s390_vcpu_mem_op()
5393 tmpbuf = vmalloc(mop->size); in kvm_s390_vcpu_mem_op()
5395 return -ENOMEM; in kvm_s390_vcpu_mem_op()
5398 acc_mode = mop->op == KVM_S390_MEMOP_LOGICAL_READ ? GACC_FETCH : GACC_STORE; in kvm_s390_vcpu_mem_op()
5399 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { in kvm_s390_vcpu_mem_op()
5400 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, in kvm_s390_vcpu_mem_op()
5401 acc_mode, mop->key); in kvm_s390_vcpu_mem_op()
5405 r = read_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf, in kvm_s390_vcpu_mem_op()
5406 mop->size, mop->key); in kvm_s390_vcpu_mem_op()
5409 if (copy_to_user(uaddr, tmpbuf, mop->size)) { in kvm_s390_vcpu_mem_op()
5410 r = -EFAULT; in kvm_s390_vcpu_mem_op()
5414 if (copy_from_user(tmpbuf, uaddr, mop->size)) { in kvm_s390_vcpu_mem_op()
5415 r = -EFAULT; in kvm_s390_vcpu_mem_op()
5418 r = write_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf, in kvm_s390_vcpu_mem_op()
5419 mop->size, mop->key); in kvm_s390_vcpu_mem_op()
5423 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0) in kvm_s390_vcpu_mem_op()
5424 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); in kvm_s390_vcpu_mem_op()
5436 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_s390_vcpu_memsida_op()
5438 switch (mop->op) { in kvm_s390_vcpu_memsida_op()
5445 /* we are locked against sida going away by the vcpu->mutex */ in kvm_s390_vcpu_memsida_op()
5449 r = -EINVAL; in kvm_s390_vcpu_memsida_op()
5452 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); in kvm_s390_vcpu_memsida_op()
5459 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_async_ioctl()
5468 return -EFAULT; in kvm_arch_vcpu_async_ioctl()
5477 return -EFAULT; in kvm_arch_vcpu_async_ioctl()
5479 return -EINVAL; in kvm_arch_vcpu_async_ioctl()
5484 rc = -ENOIOCTLCMD; in kvm_arch_vcpu_async_ioctl()
5489 * To simplify single stepping of userspace-emulated instructions, in kvm_arch_vcpu_async_ioctl()
5496 vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING; in kvm_arch_vcpu_async_ioctl()
5509 if (!vcpu->kvm->arch.pv.dumping) in kvm_s390_handle_pv_vcpu_dump()
5510 return -EINVAL; in kvm_s390_handle_pv_vcpu_dump()
5512 if (copy_from_user(&dmp, (__u8 __user *)cmd->data, sizeof(dmp))) in kvm_s390_handle_pv_vcpu_dump()
5513 return -EFAULT; in kvm_s390_handle_pv_vcpu_dump()
5517 return -EINVAL; in kvm_s390_handle_pv_vcpu_dump()
5521 return -EINVAL; in kvm_s390_handle_pv_vcpu_dump()
5525 return -ENOMEM; in kvm_s390_handle_pv_vcpu_dump()
5527 ret = kvm_s390_pv_dump_cpu(vcpu, data, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv_vcpu_dump()
5530 vcpu->vcpu_id, cmd->rc, cmd->rrc); in kvm_s390_handle_pv_vcpu_dump()
5533 ret = -EINVAL; in kvm_s390_handle_pv_vcpu_dump()
5537 ret = -EFAULT; in kvm_s390_handle_pv_vcpu_dump()
5546 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl()
5556 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
5558 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
5563 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5603 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5606 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5620 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5624 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
5625 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5629 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr, in kvm_arch_vcpu_ioctl()
5637 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5641 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
5642 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5646 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr, in kvm_arch_vcpu_ioctl()
5652 r = gmap_fault(vcpu->arch.gmap, arg, 0); in kvm_arch_vcpu_ioctl()
5658 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5670 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5676 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5682 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5694 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5698 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5710 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5714 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5718 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5731 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5735 r = -ENOTTY; in kvm_arch_vcpu_ioctl()
5745 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET) in kvm_arch_vcpu_fault()
5746 && (kvm_is_ucontrol(vcpu->kvm))) { in kvm_arch_vcpu_fault()
5747 vmf->page = virt_to_page(vcpu->arch.sie_block); in kvm_arch_vcpu_fault()
5748 get_page(vmf->page); in kvm_arch_vcpu_fault()
5770 return -EINVAL; in kvm_arch_prepare_memory_region()
5780 if (new->userspace_addr & 0xffffful) in kvm_arch_prepare_memory_region()
5781 return -EINVAL; in kvm_arch_prepare_memory_region()
5783 size = new->npages * PAGE_SIZE; in kvm_arch_prepare_memory_region()
5785 return -EINVAL; in kvm_arch_prepare_memory_region()
5787 if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit) in kvm_arch_prepare_memory_region()
5788 return -EINVAL; in kvm_arch_prepare_memory_region()
5791 if (!kvm->arch.migration_mode) in kvm_arch_prepare_memory_region()
5796 * - userspace creates a new memslot with dirty logging off, in kvm_arch_prepare_memory_region()
5797 * - userspace modifies an existing memslot (MOVE or FLAGS_ONLY) and in kvm_arch_prepare_memory_region()
5803 !(new->flags & KVM_MEM_LOG_DIRTY_PAGES)) in kvm_arch_prepare_memory_region()
5819 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
5820 old->npages * PAGE_SIZE); in kvm_arch_commit_memory_region()
5823 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
5824 old->npages * PAGE_SIZE); in kvm_arch_commit_memory_region()
5829 rc = gmap_map_segment(kvm->arch.gmap, new->userspace_addr, in kvm_arch_commit_memory_region()
5830 new->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
5831 new->npages * PAGE_SIZE); in kvm_arch_commit_memory_region()
5856 return -ENODEV; in kvm_s390_init()
5861 return -EINVAL; in kvm_s390_init()