• Home
  • Raw
  • Download

Lines Matching +full:ipa +full:- +full:clock +full:- +full:query

1 // SPDX-License-Identifier: GPL-2.0
32 #include <asm/asm-offsets.h>
43 #include "kvm-s390.h"
46 #define KMSG_COMPONENT "kvm-s390"
52 #include "trace-s390.h"
189 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
210 /* available subfunctions indicated via query / "test bit" */
220 /* every s390 is virtualization enabled ;-) */ in kvm_arch_hardware_enable()
233 * -delta to the epoch. in kvm_clock_sync_scb()
235 delta = -delta; in kvm_clock_sync_scb()
237 /* sign-extension - we're adding to signed values below */ in kvm_clock_sync_scb()
239 delta_idx = -1; in kvm_clock_sync_scb()
241 scb->epoch += delta; in kvm_clock_sync_scb()
242 if (scb->ecd & ECD_MEF) { in kvm_clock_sync_scb()
243 scb->epdx += delta_idx; in kvm_clock_sync_scb()
244 if (scb->epoch < delta) in kvm_clock_sync_scb()
245 scb->epdx += 1; in kvm_clock_sync_scb()
265 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta); in kvm_clock_sync()
267 kvm->arch.epoch = vcpu->arch.sie_block->epoch; in kvm_clock_sync()
268 kvm->arch.epdx = vcpu->arch.sie_block->epdx; in kvm_clock_sync()
270 if (vcpu->arch.cputm_enabled) in kvm_clock_sync()
271 vcpu->arch.cputm_start += *delta; in kvm_clock_sync()
272 if (vcpu->arch.vsie_block) in kvm_clock_sync()
273 kvm_clock_sync_scb(vcpu->arch.vsie_block, in kvm_clock_sync()
333 if (test_facility(28)) /* TOD-clock steering */ in kvm_s390_cpu_feat_init()
421 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long)); in kvm_arch_init()
423 return -ENOMEM; in kvm_arch_init()
426 rc = -ENOMEM; in kvm_arch_init()
456 return -EINVAL; in kvm_arch_dev_ioctl()
540 struct gmap *gmap = kvm->arch.gmap; in kvm_s390_sync_dirty_log()
544 cur_gfn = memslot->base_gfn; in kvm_s390_sync_dirty_log()
545 last_gfn = memslot->base_gfn + memslot->npages; in kvm_s390_sync_dirty_log()
581 return -EINVAL; in kvm_vm_ioctl_get_dirty_log()
583 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
585 r = -EINVAL; in kvm_vm_ioctl_get_dirty_log()
586 if (log->slot >= KVM_USER_MEM_SLOTS) in kvm_vm_ioctl_get_dirty_log()
590 memslot = id_to_memslot(slots, log->slot); in kvm_vm_ioctl_get_dirty_log()
591 r = -ENOENT; in kvm_vm_ioctl_get_dirty_log()
592 if (!memslot->dirty_bitmap) in kvm_vm_ioctl_get_dirty_log()
603 memset(memslot->dirty_bitmap, 0, n); in kvm_vm_ioctl_get_dirty_log()
607 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
625 if (cap->flags) in kvm_vm_ioctl_enable_cap()
626 return -EINVAL; in kvm_vm_ioctl_enable_cap()
628 switch (cap->cap) { in kvm_vm_ioctl_enable_cap()
631 kvm->arch.use_irqchip = 1; in kvm_vm_ioctl_enable_cap()
636 kvm->arch.user_sigp = 1; in kvm_vm_ioctl_enable_cap()
640 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
641 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
642 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
644 set_kvm_facility(kvm->arch.model.fac_mask, 129); in kvm_vm_ioctl_enable_cap()
645 set_kvm_facility(kvm->arch.model.fac_list, 129); in kvm_vm_ioctl_enable_cap()
647 set_kvm_facility(kvm->arch.model.fac_mask, 134); in kvm_vm_ioctl_enable_cap()
648 set_kvm_facility(kvm->arch.model.fac_list, 134); in kvm_vm_ioctl_enable_cap()
651 set_kvm_facility(kvm->arch.model.fac_mask, 135); in kvm_vm_ioctl_enable_cap()
652 set_kvm_facility(kvm->arch.model.fac_list, 135); in kvm_vm_ioctl_enable_cap()
656 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
657 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
662 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
663 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
664 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
665 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
667 set_kvm_facility(kvm->arch.model.fac_mask, 64); in kvm_vm_ioctl_enable_cap()
668 set_kvm_facility(kvm->arch.model.fac_list, 64); in kvm_vm_ioctl_enable_cap()
671 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
676 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
677 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
678 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
680 set_kvm_facility(kvm->arch.model.fac_mask, 72); in kvm_vm_ioctl_enable_cap()
681 set_kvm_facility(kvm->arch.model.fac_list, 72); in kvm_vm_ioctl_enable_cap()
684 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
689 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
690 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
691 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
692 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
694 set_kvm_facility(kvm->arch.model.fac_mask, 133); in kvm_vm_ioctl_enable_cap()
695 set_kvm_facility(kvm->arch.model.fac_list, 133); in kvm_vm_ioctl_enable_cap()
698 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
703 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
704 if (kvm->created_vcpus) in kvm_vm_ioctl_enable_cap()
705 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
706 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_enable_cap()
707 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
710 down_write(&kvm->mm->mmap_sem); in kvm_vm_ioctl_enable_cap()
711 kvm->mm->context.allow_gmap_hpage_1m = 1; in kvm_vm_ioctl_enable_cap()
712 up_write(&kvm->mm->mmap_sem); in kvm_vm_ioctl_enable_cap()
718 kvm->arch.use_skf = 0; in kvm_vm_ioctl_enable_cap()
719 kvm->arch.use_pfmfi = 0; in kvm_vm_ioctl_enable_cap()
721 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
727 kvm->arch.user_stsi = 1; in kvm_vm_ioctl_enable_cap()
732 kvm->arch.user_instr0 = 1; in kvm_vm_ioctl_enable_cap()
737 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
747 switch (attr->attr) { in kvm_s390_get_mem_control()
750 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes", in kvm_s390_get_mem_control()
751 kvm->arch.mem_limit); in kvm_s390_get_mem_control()
752 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr)) in kvm_s390_get_mem_control()
753 ret = -EFAULT; in kvm_s390_get_mem_control()
756 ret = -ENXIO; in kvm_s390_get_mem_control()
766 switch (attr->attr) { in kvm_s390_set_mem_control()
768 ret = -ENXIO; in kvm_s390_set_mem_control()
773 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
774 if (kvm->created_vcpus) in kvm_s390_set_mem_control()
775 ret = -EBUSY; in kvm_s390_set_mem_control()
776 else if (kvm->mm->context.allow_gmap_hpage_1m) in kvm_s390_set_mem_control()
777 ret = -EINVAL; in kvm_s390_set_mem_control()
779 kvm->arch.use_cmma = 1; in kvm_s390_set_mem_control()
781 kvm->arch.use_pfmfi = 0; in kvm_s390_set_mem_control()
784 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
787 ret = -ENXIO; in kvm_s390_set_mem_control()
790 ret = -EINVAL; in kvm_s390_set_mem_control()
791 if (!kvm->arch.use_cmma) in kvm_s390_set_mem_control()
795 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
796 idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_mem_control()
797 s390_reset_cmma(kvm->arch.gmap->mm); in kvm_s390_set_mem_control()
798 srcu_read_unlock(&kvm->srcu, idx); in kvm_s390_set_mem_control()
799 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
806 return -EINVAL; in kvm_s390_set_mem_control()
808 if (get_user(new_limit, (u64 __user *)attr->addr)) in kvm_s390_set_mem_control()
809 return -EFAULT; in kvm_s390_set_mem_control()
811 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT && in kvm_s390_set_mem_control()
812 new_limit > kvm->arch.mem_limit) in kvm_s390_set_mem_control()
813 return -E2BIG; in kvm_s390_set_mem_control()
816 return -EINVAL; in kvm_s390_set_mem_control()
820 new_limit -= 1; in kvm_s390_set_mem_control()
822 ret = -EBUSY; in kvm_s390_set_mem_control()
823 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
824 if (!kvm->created_vcpus) { in kvm_s390_set_mem_control()
826 struct gmap *new = gmap_create(current->mm, new_limit); in kvm_s390_set_mem_control()
829 ret = -ENOMEM; in kvm_s390_set_mem_control()
831 gmap_remove(kvm->arch.gmap); in kvm_s390_set_mem_control()
832 new->private = kvm; in kvm_s390_set_mem_control()
833 kvm->arch.gmap = new; in kvm_s390_set_mem_control()
837 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
840 (void *) kvm->arch.gmap->asce); in kvm_s390_set_mem_control()
844 ret = -ENXIO; in kvm_s390_set_mem_control()
868 return -EINVAL; in kvm_s390_vm_set_crypto()
870 mutex_lock(&kvm->lock); in kvm_s390_vm_set_crypto()
871 switch (attr->attr) { in kvm_s390_vm_set_crypto()
874 kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_vm_set_crypto()
875 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
876 kvm->arch.crypto.aes_kw = 1; in kvm_s390_vm_set_crypto()
881 kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_vm_set_crypto()
882 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
883 kvm->arch.crypto.dea_kw = 1; in kvm_s390_vm_set_crypto()
887 kvm->arch.crypto.aes_kw = 0; in kvm_s390_vm_set_crypto()
888 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
889 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
893 kvm->arch.crypto.dea_kw = 0; in kvm_s390_vm_set_crypto()
894 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
895 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
899 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
900 return -ENXIO; in kvm_s390_vm_set_crypto()
904 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
918 * Must be called with kvm->srcu held to avoid races on memslots, and with
919 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
929 if (kvm->arch.migration_mode) in kvm_s390_vm_start_migration()
932 if (!slots || !slots->used_slots) in kvm_s390_vm_start_migration()
933 return -EINVAL; in kvm_s390_vm_start_migration()
935 if (!kvm->arch.use_cmma) { in kvm_s390_vm_start_migration()
936 kvm->arch.migration_mode = 1; in kvm_s390_vm_start_migration()
940 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) { in kvm_s390_vm_start_migration()
941 ms = slots->memslots + slotnr; in kvm_s390_vm_start_migration()
942 if (!ms->dirty_bitmap) in kvm_s390_vm_start_migration()
943 return -EINVAL; in kvm_s390_vm_start_migration()
951 ram_pages += ms->npages; in kvm_s390_vm_start_migration()
953 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages); in kvm_s390_vm_start_migration()
954 kvm->arch.migration_mode = 1; in kvm_s390_vm_start_migration()
960 * Must be called with kvm->slots_lock to avoid races with ourselves and
966 if (!kvm->arch.migration_mode) in kvm_s390_vm_stop_migration()
968 kvm->arch.migration_mode = 0; in kvm_s390_vm_stop_migration()
969 if (kvm->arch.use_cmma) in kvm_s390_vm_stop_migration()
977 int res = -ENXIO; in kvm_s390_vm_set_migration()
979 mutex_lock(&kvm->slots_lock); in kvm_s390_vm_set_migration()
980 switch (attr->attr) { in kvm_s390_vm_set_migration()
990 mutex_unlock(&kvm->slots_lock); in kvm_s390_vm_set_migration()
998 u64 mig = kvm->arch.migration_mode; in kvm_s390_vm_get_migration()
1000 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS) in kvm_s390_vm_get_migration()
1001 return -ENXIO; in kvm_s390_vm_get_migration()
1003 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig))) in kvm_s390_vm_get_migration()
1004 return -EFAULT; in kvm_s390_vm_get_migration()
1012 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod))) in kvm_s390_set_tod_ext()
1013 return -EFAULT; in kvm_s390_set_tod_ext()
1016 return -EINVAL; in kvm_s390_set_tod_ext()
1029 if (copy_from_user(&gtod_high, (void __user *)attr->addr, in kvm_s390_set_tod_high()
1031 return -EFAULT; in kvm_s390_set_tod_high()
1034 return -EINVAL; in kvm_s390_set_tod_high()
1044 if (copy_from_user(&gtod.tod, (void __user *)attr->addr, in kvm_s390_set_tod_low()
1046 return -EFAULT; in kvm_s390_set_tod_low()
1057 if (attr->flags) in kvm_s390_set_tod()
1058 return -EINVAL; in kvm_s390_set_tod()
1060 switch (attr->attr) { in kvm_s390_set_tod()
1071 ret = -ENXIO; in kvm_s390_set_tod()
1086 gtod->tod = htod.tod + kvm->arch.epoch; in kvm_s390_get_tod_clock()
1087 gtod->epoch_idx = 0; in kvm_s390_get_tod_clock()
1089 gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx; in kvm_s390_get_tod_clock()
1090 if (gtod->tod < htod.tod) in kvm_s390_get_tod_clock()
1091 gtod->epoch_idx += 1; in kvm_s390_get_tod_clock()
1103 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod))) in kvm_s390_get_tod_ext()
1104 return -EFAULT; in kvm_s390_get_tod_ext()
1106 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx", in kvm_s390_get_tod_ext()
1115 if (copy_to_user((void __user *)attr->addr, &gtod_high, in kvm_s390_get_tod_high()
1117 return -EFAULT; in kvm_s390_get_tod_high()
1118 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high); in kvm_s390_get_tod_high()
1128 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod))) in kvm_s390_get_tod_low()
1129 return -EFAULT; in kvm_s390_get_tod_low()
1130 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod); in kvm_s390_get_tod_low()
1139 if (attr->flags) in kvm_s390_get_tod()
1140 return -EINVAL; in kvm_s390_get_tod()
1142 switch (attr->attr) { in kvm_s390_get_tod()
1153 ret = -ENXIO; in kvm_s390_get_tod()
1165 mutex_lock(&kvm->lock); in kvm_s390_set_processor()
1166 if (kvm->created_vcpus) { in kvm_s390_set_processor()
1167 ret = -EBUSY; in kvm_s390_set_processor()
1172 ret = -ENOMEM; in kvm_s390_set_processor()
1175 if (!copy_from_user(proc, (void __user *)attr->addr, in kvm_s390_set_processor()
1177 kvm->arch.model.cpuid = proc->cpuid; in kvm_s390_set_processor()
1180 if (lowest_ibc && proc->ibc) { in kvm_s390_set_processor()
1181 if (proc->ibc > unblocked_ibc) in kvm_s390_set_processor()
1182 kvm->arch.model.ibc = unblocked_ibc; in kvm_s390_set_processor()
1183 else if (proc->ibc < lowest_ibc) in kvm_s390_set_processor()
1184 kvm->arch.model.ibc = lowest_ibc; in kvm_s390_set_processor()
1186 kvm->arch.model.ibc = proc->ibc; in kvm_s390_set_processor()
1188 memcpy(kvm->arch.model.fac_list, proc->fac_list, in kvm_s390_set_processor()
1191 kvm->arch.model.ibc, in kvm_s390_set_processor()
1192 kvm->arch.model.cpuid); in kvm_s390_set_processor()
1194 kvm->arch.model.fac_list[0], in kvm_s390_set_processor()
1195 kvm->arch.model.fac_list[1], in kvm_s390_set_processor()
1196 kvm->arch.model.fac_list[2]); in kvm_s390_set_processor()
1198 ret = -EFAULT; in kvm_s390_set_processor()
1201 mutex_unlock(&kvm->lock); in kvm_s390_set_processor()
1210 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data))) in kvm_s390_set_processor_feat()
1211 return -EFAULT; in kvm_s390_set_processor_feat()
1215 return -EINVAL; in kvm_s390_set_processor_feat()
1217 mutex_lock(&kvm->lock); in kvm_s390_set_processor_feat()
1218 if (kvm->created_vcpus) { in kvm_s390_set_processor_feat()
1219 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_feat()
1220 return -EBUSY; in kvm_s390_set_processor_feat()
1222 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat, in kvm_s390_set_processor_feat()
1224 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_feat()
1237 * in kvm->arch and remember that user space configured them. in kvm_s390_set_processor_subfunc()
1239 return -ENXIO; in kvm_s390_set_processor_subfunc()
1244 int ret = -ENXIO; in kvm_s390_set_cpu_model()
1246 switch (attr->attr) { in kvm_s390_set_cpu_model()
1267 ret = -ENOMEM; in kvm_s390_get_processor()
1270 proc->cpuid = kvm->arch.model.cpuid; in kvm_s390_get_processor()
1271 proc->ibc = kvm->arch.model.ibc; in kvm_s390_get_processor()
1272 memcpy(&proc->fac_list, kvm->arch.model.fac_list, in kvm_s390_get_processor()
1275 kvm->arch.model.ibc, in kvm_s390_get_processor()
1276 kvm->arch.model.cpuid); in kvm_s390_get_processor()
1278 kvm->arch.model.fac_list[0], in kvm_s390_get_processor()
1279 kvm->arch.model.fac_list[1], in kvm_s390_get_processor()
1280 kvm->arch.model.fac_list[2]); in kvm_s390_get_processor()
1281 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc))) in kvm_s390_get_processor()
1282 ret = -EFAULT; in kvm_s390_get_processor()
1295 ret = -ENOMEM; in kvm_s390_get_machine()
1298 get_cpu_id((struct cpuid *) &mach->cpuid); in kvm_s390_get_machine()
1299 mach->ibc = sclp.ibc; in kvm_s390_get_machine()
1300 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask, in kvm_s390_get_machine()
1302 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list, in kvm_s390_get_machine()
1305 kvm->arch.model.ibc, in kvm_s390_get_machine()
1306 kvm->arch.model.cpuid); in kvm_s390_get_machine()
1308 mach->fac_mask[0], in kvm_s390_get_machine()
1309 mach->fac_mask[1], in kvm_s390_get_machine()
1310 mach->fac_mask[2]); in kvm_s390_get_machine()
1312 mach->fac_list[0], in kvm_s390_get_machine()
1313 mach->fac_list[1], in kvm_s390_get_machine()
1314 mach->fac_list[2]); in kvm_s390_get_machine()
1315 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) in kvm_s390_get_machine()
1316 ret = -EFAULT; in kvm_s390_get_machine()
1327 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat, in kvm_s390_get_processor_feat()
1329 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data))) in kvm_s390_get_processor_feat()
1330 return -EFAULT; in kvm_s390_get_processor_feat()
1346 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data))) in kvm_s390_get_machine_feat()
1347 return -EFAULT; in kvm_s390_get_machine_feat()
1361 * them from kvm->arch. in kvm_s390_get_processor_subfunc()
1363 return -ENXIO; in kvm_s390_get_processor_subfunc()
1369 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc, in kvm_s390_get_machine_subfunc()
1371 return -EFAULT; in kvm_s390_get_machine_subfunc()
1376 int ret = -ENXIO; in kvm_s390_get_cpu_model()
1378 switch (attr->attr) { in kvm_s390_get_cpu_model()
1405 switch (attr->group) { in kvm_s390_vm_set_attr()
1422 ret = -ENXIO; in kvm_s390_vm_set_attr()
1433 switch (attr->group) { in kvm_s390_vm_get_attr()
1447 ret = -ENXIO; in kvm_s390_vm_get_attr()
1458 switch (attr->group) { in kvm_s390_vm_has_attr()
1460 switch (attr->attr) { in kvm_s390_vm_has_attr()
1463 ret = sclp.has_cmma ? 0 : -ENXIO; in kvm_s390_vm_has_attr()
1469 ret = -ENXIO; in kvm_s390_vm_has_attr()
1474 switch (attr->attr) { in kvm_s390_vm_has_attr()
1480 ret = -ENXIO; in kvm_s390_vm_has_attr()
1485 switch (attr->attr) { in kvm_s390_vm_has_attr()
1496 ret = -ENXIO; in kvm_s390_vm_has_attr()
1501 switch (attr->attr) { in kvm_s390_vm_has_attr()
1509 ret = -ENXIO; in kvm_s390_vm_has_attr()
1517 ret = -ENXIO; in kvm_s390_vm_has_attr()
1530 if (args->flags != 0) in kvm_s390_get_skeys()
1531 return -EINVAL; in kvm_s390_get_skeys()
1534 if (!mm_uses_skeys(current->mm)) in kvm_s390_get_skeys()
1538 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) in kvm_s390_get_skeys()
1539 return -EINVAL; in kvm_s390_get_skeys()
1541 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL); in kvm_s390_get_skeys()
1543 return -ENOMEM; in kvm_s390_get_skeys()
1545 down_read(&current->mm->mmap_sem); in kvm_s390_get_skeys()
1546 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_get_skeys()
1547 for (i = 0; i < args->count; i++) { in kvm_s390_get_skeys()
1548 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_get_skeys()
1550 r = -EFAULT; in kvm_s390_get_skeys()
1554 r = get_guest_storage_key(current->mm, hva, &keys[i]); in kvm_s390_get_skeys()
1558 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_get_skeys()
1559 up_read(&current->mm->mmap_sem); in kvm_s390_get_skeys()
1562 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys, in kvm_s390_get_skeys()
1563 sizeof(uint8_t) * args->count); in kvm_s390_get_skeys()
1565 r = -EFAULT; in kvm_s390_get_skeys()
1579 if (args->flags != 0) in kvm_s390_set_skeys()
1580 return -EINVAL; in kvm_s390_set_skeys()
1583 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) in kvm_s390_set_skeys()
1584 return -EINVAL; in kvm_s390_set_skeys()
1586 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL); in kvm_s390_set_skeys()
1588 return -ENOMEM; in kvm_s390_set_skeys()
1590 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr, in kvm_s390_set_skeys()
1591 sizeof(uint8_t) * args->count); in kvm_s390_set_skeys()
1593 r = -EFAULT; in kvm_s390_set_skeys()
1603 down_read(&current->mm->mmap_sem); in kvm_s390_set_skeys()
1604 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_skeys()
1605 while (i < args->count) { in kvm_s390_set_skeys()
1607 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_set_skeys()
1609 r = -EFAULT; in kvm_s390_set_skeys()
1615 r = -EINVAL; in kvm_s390_set_skeys()
1619 r = set_guest_storage_key(current->mm, hva, keys[i], 0); in kvm_s390_set_skeys()
1621 r = fixup_user_fault(current, current->mm, hva, in kvm_s390_set_skeys()
1629 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_set_skeys()
1630 up_read(&current->mm->mmap_sem); in kvm_s390_set_skeys()
1652 int start = 0, end = slots->used_slots; in gfn_to_memslot_approx()
1653 int slot = atomic_read(&slots->lru_slot); in gfn_to_memslot_approx()
1654 struct kvm_memory_slot *memslots = slots->memslots; in gfn_to_memslot_approx()
1661 slot = start + (end - start) / 2; in gfn_to_memslot_approx()
1669 if (start >= slots->used_slots) in gfn_to_memslot_approx()
1670 return slots->used_slots - 1; in gfn_to_memslot_approx()
1674 atomic_set(&slots->lru_slot, start); in gfn_to_memslot_approx()
1683 unsigned long pgstev, hva, cur_gfn = args->start_gfn; in kvm_s390_peek_cmma()
1685 args->count = 0; in kvm_s390_peek_cmma()
1686 while (args->count < bufsize) { in kvm_s390_peek_cmma()
1693 return args->count ? 0 : -EFAULT; in kvm_s390_peek_cmma()
1694 if (get_pgste(kvm->mm, hva, &pgstev) < 0) in kvm_s390_peek_cmma()
1696 res[args->count++] = (pgstev >> 24) & 0x43; in kvm_s390_peek_cmma()
1707 struct kvm_memory_slot *ms = slots->memslots + slotidx; in kvm_s390_next_dirty_cmma()
1708 unsigned long ofs = cur_gfn - ms->base_gfn; in kvm_s390_next_dirty_cmma()
1710 if (ms->base_gfn + ms->npages <= cur_gfn) { in kvm_s390_next_dirty_cmma()
1711 slotidx--; in kvm_s390_next_dirty_cmma()
1714 slotidx = slots->used_slots - 1; in kvm_s390_next_dirty_cmma()
1716 ms = slots->memslots + slotidx; in kvm_s390_next_dirty_cmma()
1719 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs); in kvm_s390_next_dirty_cmma()
1720 while ((slotidx > 0) && (ofs >= ms->npages)) { in kvm_s390_next_dirty_cmma()
1721 slotidx--; in kvm_s390_next_dirty_cmma()
1722 ms = slots->memslots + slotidx; in kvm_s390_next_dirty_cmma()
1723 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0); in kvm_s390_next_dirty_cmma()
1725 return ms->base_gfn + ofs; in kvm_s390_next_dirty_cmma()
1735 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn); in kvm_s390_get_cmma()
1737 args->count = 0; in kvm_s390_get_cmma()
1738 args->start_gfn = cur_gfn; in kvm_s390_get_cmma()
1742 mem_end = slots->memslots[0].base_gfn + slots->memslots[0].npages; in kvm_s390_get_cmma()
1744 while (args->count < bufsize) { in kvm_s390_get_cmma()
1749 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms))) in kvm_s390_get_cmma()
1750 atomic64_dec(&kvm->arch.cmma_dirty_pages); in kvm_s390_get_cmma()
1751 if (get_pgste(kvm->mm, hva, &pgstev) < 0) in kvm_s390_get_cmma()
1754 res[args->count++] = (pgstev >> 24) & 0x43; in kvm_s390_get_cmma()
1763 (next_gfn - args->start_gfn >= bufsize)) in kvm_s390_get_cmma()
1767 if (cur_gfn - ms->base_gfn >= ms->npages) { in kvm_s390_get_cmma()
1791 if (!kvm->arch.use_cmma) in kvm_s390_get_cmma_bits()
1792 return -ENXIO; in kvm_s390_get_cmma_bits()
1794 if (args->flags & ~KVM_S390_CMMA_PEEK) in kvm_s390_get_cmma_bits()
1795 return -EINVAL; in kvm_s390_get_cmma_bits()
1796 /* Migration mode query, and we are not doing a migration */ in kvm_s390_get_cmma_bits()
1797 peek = !!(args->flags & KVM_S390_CMMA_PEEK); in kvm_s390_get_cmma_bits()
1798 if (!peek && !kvm->arch.migration_mode) in kvm_s390_get_cmma_bits()
1799 return -EINVAL; in kvm_s390_get_cmma_bits()
1801 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX); in kvm_s390_get_cmma_bits()
1802 if (!bufsize || !kvm->mm->context.uses_cmm) { in kvm_s390_get_cmma_bits()
1807 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) { in kvm_s390_get_cmma_bits()
1814 return -ENOMEM; in kvm_s390_get_cmma_bits()
1816 down_read(&kvm->mm->mmap_sem); in kvm_s390_get_cmma_bits()
1817 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_get_cmma_bits()
1822 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_get_cmma_bits()
1823 up_read(&kvm->mm->mmap_sem); in kvm_s390_get_cmma_bits()
1825 if (kvm->arch.migration_mode) in kvm_s390_get_cmma_bits()
1826 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages); in kvm_s390_get_cmma_bits()
1828 args->remaining = 0; in kvm_s390_get_cmma_bits()
1830 if (copy_to_user((void __user *)args->values, values, args->count)) in kvm_s390_get_cmma_bits()
1831 ret = -EFAULT; in kvm_s390_get_cmma_bits()
1840 * set and the mm->context.uses_cmm flag is set.
1849 mask = args->mask; in kvm_s390_set_cmma_bits()
1851 if (!kvm->arch.use_cmma) in kvm_s390_set_cmma_bits()
1852 return -ENXIO; in kvm_s390_set_cmma_bits()
1854 if (args->flags != 0) in kvm_s390_set_cmma_bits()
1855 return -EINVAL; in kvm_s390_set_cmma_bits()
1857 if (args->count > KVM_S390_CMMA_SIZE_MAX) in kvm_s390_set_cmma_bits()
1858 return -EINVAL; in kvm_s390_set_cmma_bits()
1860 if (args->count == 0) in kvm_s390_set_cmma_bits()
1863 bits = vmalloc(array_size(sizeof(*bits), args->count)); in kvm_s390_set_cmma_bits()
1865 return -ENOMEM; in kvm_s390_set_cmma_bits()
1867 r = copy_from_user(bits, (void __user *)args->values, args->count); in kvm_s390_set_cmma_bits()
1869 r = -EFAULT; in kvm_s390_set_cmma_bits()
1873 down_read(&kvm->mm->mmap_sem); in kvm_s390_set_cmma_bits()
1874 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_cmma_bits()
1875 for (i = 0; i < args->count; i++) { in kvm_s390_set_cmma_bits()
1876 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_set_cmma_bits()
1878 r = -EFAULT; in kvm_s390_set_cmma_bits()
1885 set_pgste_bits(kvm->mm, hva, mask, pgstev); in kvm_s390_set_cmma_bits()
1887 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_set_cmma_bits()
1888 up_read(&kvm->mm->mmap_sem); in kvm_s390_set_cmma_bits()
1890 if (!kvm->mm->context.uses_cmm) { in kvm_s390_set_cmma_bits()
1891 down_write(&kvm->mm->mmap_sem); in kvm_s390_set_cmma_bits()
1892 kvm->mm->context.uses_cmm = 1; in kvm_s390_set_cmma_bits()
1893 up_write(&kvm->mm->mmap_sem); in kvm_s390_set_cmma_bits()
1903 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl()
1912 r = -EFAULT; in kvm_arch_vm_ioctl()
1920 r = -EFAULT; in kvm_arch_vm_ioctl()
1929 r = -EINVAL; in kvm_arch_vm_ioctl()
1930 if (kvm->arch.use_irqchip) { in kvm_arch_vm_ioctl()
1938 r = -EFAULT; in kvm_arch_vm_ioctl()
1945 r = -EFAULT; in kvm_arch_vm_ioctl()
1952 r = -EFAULT; in kvm_arch_vm_ioctl()
1961 r = -EFAULT; in kvm_arch_vm_ioctl()
1971 r = -EFAULT; in kvm_arch_vm_ioctl()
1981 r = -EFAULT; in kvm_arch_vm_ioctl()
1984 mutex_lock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
1986 mutex_unlock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
1990 r = -EFAULT; in kvm_arch_vm_ioctl()
1997 r = -EFAULT; in kvm_arch_vm_ioctl()
2000 mutex_lock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
2002 mutex_unlock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
2006 r = -ENOTTY; in kvm_arch_vm_ioctl()
2053 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb; in kvm_s390_set_crycb_format()
2056 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2; in kvm_s390_set_crycb_format()
2058 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; in kvm_s390_set_crycb_format()
2075 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb; in kvm_s390_crypto_init()
2079 kvm->arch.crypto.aes_kw = 1; in kvm_s390_crypto_init()
2080 kvm->arch.crypto.dea_kw = 1; in kvm_s390_crypto_init()
2081 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_crypto_init()
2082 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_crypto_init()
2083 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_crypto_init()
2084 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_crypto_init()
2089 if (kvm->arch.use_esca) in sca_dispose()
2090 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block)); in sca_dispose()
2092 free_page((unsigned long)(kvm->arch.sca)); in sca_dispose()
2093 kvm->arch.sca = NULL; in sca_dispose()
2103 rc = -EINVAL; in kvm_arch_init_vm()
2118 rc = -ENOMEM; in kvm_arch_init_vm()
2122 rwlock_init(&kvm->arch.sca_lock); in kvm_arch_init_vm()
2124 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags); in kvm_arch_init_vm()
2125 if (!kvm->arch.sca) in kvm_arch_init_vm()
2131 kvm->arch.sca = (struct bsca_block *) in kvm_arch_init_vm()
2132 ((char *) kvm->arch.sca + sca_offset); in kvm_arch_init_vm()
2135 sprintf(debug_name, "kvm-%u", current->pid); in kvm_arch_init_vm()
2137 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long)); in kvm_arch_init_vm()
2138 if (!kvm->arch.dbf) in kvm_arch_init_vm()
2142 kvm->arch.sie_page2 = in kvm_arch_init_vm()
2144 if (!kvm->arch.sie_page2) in kvm_arch_init_vm()
2147 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list; in kvm_arch_init_vm()
2150 kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] & in kvm_arch_init_vm()
2153 kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] & in kvm_arch_init_vm()
2157 /* we are always in czam mode - even on pre z14 machines */ in kvm_arch_init_vm()
2158 set_kvm_facility(kvm->arch.model.fac_mask, 138); in kvm_arch_init_vm()
2159 set_kvm_facility(kvm->arch.model.fac_list, 138); in kvm_arch_init_vm()
2161 set_kvm_facility(kvm->arch.model.fac_mask, 74); in kvm_arch_init_vm()
2162 set_kvm_facility(kvm->arch.model.fac_list, 74); in kvm_arch_init_vm()
2164 set_kvm_facility(kvm->arch.model.fac_mask, 147); in kvm_arch_init_vm()
2165 set_kvm_facility(kvm->arch.model.fac_list, 147); in kvm_arch_init_vm()
2168 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid(); in kvm_arch_init_vm()
2169 kvm->arch.model.ibc = sclp.ibc & 0x0fff; in kvm_arch_init_vm()
2173 mutex_init(&kvm->arch.float_int.ais_lock); in kvm_arch_init_vm()
2174 spin_lock_init(&kvm->arch.float_int.lock); in kvm_arch_init_vm()
2176 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]); in kvm_arch_init_vm()
2177 init_waitqueue_head(&kvm->arch.ipte_wq); in kvm_arch_init_vm()
2178 mutex_init(&kvm->arch.ipte_mutex); in kvm_arch_init_vm()
2180 debug_register_view(kvm->arch.dbf, &debug_sprintf_view); in kvm_arch_init_vm()
2184 kvm->arch.gmap = NULL; in kvm_arch_init_vm()
2185 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT; in kvm_arch_init_vm()
2188 kvm->arch.mem_limit = TASK_SIZE_MAX; in kvm_arch_init_vm()
2190 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX, in kvm_arch_init_vm()
2192 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1); in kvm_arch_init_vm()
2193 if (!kvm->arch.gmap) in kvm_arch_init_vm()
2195 kvm->arch.gmap->private = kvm; in kvm_arch_init_vm()
2196 kvm->arch.gmap->pfault_enabled = 0; in kvm_arch_init_vm()
2199 kvm->arch.use_pfmfi = sclp.has_pfmfi; in kvm_arch_init_vm()
2200 kvm->arch.use_skf = sclp.has_skey; in kvm_arch_init_vm()
2201 spin_lock_init(&kvm->arch.start_stop_lock); in kvm_arch_init_vm()
2204 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid); in kvm_arch_init_vm()
2208 free_page((unsigned long)kvm->arch.sie_page2); in kvm_arch_init_vm()
2209 debug_unregister(kvm->arch.dbf); in kvm_arch_init_vm()
2228 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id); in kvm_arch_vcpu_destroy()
2231 if (!kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_destroy()
2234 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_destroy()
2235 gmap_remove(vcpu->arch.gmap); in kvm_arch_vcpu_destroy()
2237 if (vcpu->kvm->arch.use_cmma) in kvm_arch_vcpu_destroy()
2239 free_page((unsigned long)(vcpu->arch.sie_block)); in kvm_arch_vcpu_destroy()
2253 mutex_lock(&kvm->lock); in kvm_free_vcpus()
2254 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) in kvm_free_vcpus()
2255 kvm->vcpus[i] = NULL; in kvm_free_vcpus()
2257 atomic_set(&kvm->online_vcpus, 0); in kvm_free_vcpus()
2258 mutex_unlock(&kvm->lock); in kvm_free_vcpus()
2265 debug_unregister(kvm->arch.dbf); in kvm_arch_destroy_vm()
2267 free_page((unsigned long)kvm->arch.sie_page2); in kvm_arch_destroy_vm()
2269 gmap_remove(kvm->arch.gmap); in kvm_arch_destroy_vm()
2279 vcpu->arch.gmap = gmap_create(current->mm, -1UL); in __kvm_ucontrol_vcpu_init()
2280 if (!vcpu->arch.gmap) in __kvm_ucontrol_vcpu_init()
2281 return -ENOMEM; in __kvm_ucontrol_vcpu_init()
2282 vcpu->arch.gmap->private = vcpu->kvm; in __kvm_ucontrol_vcpu_init()
2291 read_lock(&vcpu->kvm->arch.sca_lock); in sca_del_vcpu()
2292 if (vcpu->kvm->arch.use_esca) { in sca_del_vcpu()
2293 struct esca_block *sca = vcpu->kvm->arch.sca; in sca_del_vcpu()
2295 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn); in sca_del_vcpu()
2296 sca->cpu[vcpu->vcpu_id].sda = 0; in sca_del_vcpu()
2298 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_del_vcpu()
2300 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn); in sca_del_vcpu()
2301 sca->cpu[vcpu->vcpu_id].sda = 0; in sca_del_vcpu()
2303 read_unlock(&vcpu->kvm->arch.sca_lock); in sca_del_vcpu()
2309 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
2312 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); in sca_add_vcpu()
2313 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca; in sca_add_vcpu()
2316 read_lock(&vcpu->kvm->arch.sca_lock); in sca_add_vcpu()
2317 if (vcpu->kvm->arch.use_esca) { in sca_add_vcpu()
2318 struct esca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
2320 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block; in sca_add_vcpu()
2321 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); in sca_add_vcpu()
2322 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU; in sca_add_vcpu()
2323 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA; in sca_add_vcpu()
2324 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn); in sca_add_vcpu()
2326 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
2328 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block; in sca_add_vcpu()
2329 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); in sca_add_vcpu()
2330 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca; in sca_add_vcpu()
2331 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn); in sca_add_vcpu()
2333 read_unlock(&vcpu->kvm->arch.sca_lock); in sca_add_vcpu()
2339 d->sda = s->sda; in sca_copy_entry()
2340 d->sigp_ctrl.c = s->sigp_ctrl.c; in sca_copy_entry()
2341 d->sigp_ctrl.scn = s->sigp_ctrl.scn; in sca_copy_entry()
2348 d->ipte_control = s->ipte_control; in sca_copy_b_to_e()
2349 d->mcn[0] = s->mcn; in sca_copy_b_to_e()
2351 sca_copy_entry(&d->cpu[i], &s->cpu[i]); in sca_copy_b_to_e()
2356 struct bsca_block *old_sca = kvm->arch.sca; in sca_switch_to_extended()
2364 return -ENOMEM; in sca_switch_to_extended()
2370 write_lock(&kvm->arch.sca_lock); in sca_switch_to_extended()
2375 vcpu->arch.sie_block->scaoh = scaoh; in sca_switch_to_extended()
2376 vcpu->arch.sie_block->scaol = scaol; in sca_switch_to_extended()
2377 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA; in sca_switch_to_extended()
2379 kvm->arch.sca = new_sca; in sca_switch_to_extended()
2380 kvm->arch.use_esca = 1; in sca_switch_to_extended()
2382 write_unlock(&kvm->arch.sca_lock); in sca_switch_to_extended()
2387 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)", in sca_switch_to_extended()
2388 old_sca, kvm->arch.sca); in sca_switch_to_extended()
2406 mutex_lock(&kvm->lock); in sca_can_add_vcpu()
2407 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm); in sca_can_add_vcpu()
2408 mutex_unlock(&kvm->lock); in sca_can_add_vcpu()
2415 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; in kvm_arch_vcpu_init()
2417 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | in kvm_arch_vcpu_init()
2424 if (test_kvm_facility(vcpu->kvm, 64)) in kvm_arch_vcpu_init()
2425 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB; in kvm_arch_vcpu_init()
2426 if (test_kvm_facility(vcpu->kvm, 82)) in kvm_arch_vcpu_init()
2427 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC; in kvm_arch_vcpu_init()
2428 if (test_kvm_facility(vcpu->kvm, 133)) in kvm_arch_vcpu_init()
2429 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB; in kvm_arch_vcpu_init()
2430 if (test_kvm_facility(vcpu->kvm, 156)) in kvm_arch_vcpu_init()
2431 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN; in kvm_arch_vcpu_init()
2436 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS; in kvm_arch_vcpu_init()
2438 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS; in kvm_arch_vcpu_init()
2440 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_init()
2449 WARN_ON_ONCE(vcpu->arch.cputm_start != 0); in __start_cpu_timer_accounting()
2450 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); in __start_cpu_timer_accounting()
2451 vcpu->arch.cputm_start = get_tod_clock_fast(); in __start_cpu_timer_accounting()
2452 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); in __start_cpu_timer_accounting()
2458 WARN_ON_ONCE(vcpu->arch.cputm_start == 0); in __stop_cpu_timer_accounting()
2459 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); in __stop_cpu_timer_accounting()
2460 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start; in __stop_cpu_timer_accounting()
2461 vcpu->arch.cputm_start = 0; in __stop_cpu_timer_accounting()
2462 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); in __stop_cpu_timer_accounting()
2468 WARN_ON_ONCE(vcpu->arch.cputm_enabled); in __enable_cpu_timer_accounting()
2469 vcpu->arch.cputm_enabled = true; in __enable_cpu_timer_accounting()
2476 WARN_ON_ONCE(!vcpu->arch.cputm_enabled); in __disable_cpu_timer_accounting()
2478 vcpu->arch.cputm_enabled = false; in __disable_cpu_timer_accounting()
2495 /* set the cpu timer - may only be called from the VCPU thread itself */
2499 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); in kvm_s390_set_cpu_timer()
2500 if (vcpu->arch.cputm_enabled) in kvm_s390_set_cpu_timer()
2501 vcpu->arch.cputm_start = get_tod_clock_fast(); in kvm_s390_set_cpu_timer()
2502 vcpu->arch.sie_block->cputm = cputm; in kvm_s390_set_cpu_timer()
2503 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); in kvm_s390_set_cpu_timer()
2507 /* update and get the cpu timer - can also be called from other VCPU threads */
2513 if (unlikely(!vcpu->arch.cputm_enabled)) in kvm_s390_get_cpu_timer()
2514 return vcpu->arch.sie_block->cputm; in kvm_s390_get_cpu_timer()
2518 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount); in kvm_s390_get_cpu_timer()
2523 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu); in kvm_s390_get_cpu_timer()
2524 value = vcpu->arch.sie_block->cputm; in kvm_s390_get_cpu_timer()
2526 if (likely(vcpu->arch.cputm_start)) in kvm_s390_get_cpu_timer()
2527 value -= get_tod_clock_fast() - vcpu->arch.cputm_start; in kvm_s390_get_cpu_timer()
2528 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1)); in kvm_s390_get_cpu_timer()
2536 gmap_enable(vcpu->arch.enabled_gmap); in kvm_arch_vcpu_load()
2538 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) in kvm_arch_vcpu_load()
2540 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
2545 vcpu->cpu = -1; in kvm_arch_vcpu_put()
2546 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) in kvm_arch_vcpu_put()
2549 vcpu->arch.enabled_gmap = gmap_get_enabled(); in kvm_arch_vcpu_put()
2550 gmap_disable(vcpu->arch.enabled_gmap); in kvm_arch_vcpu_put()
2557 vcpu->arch.sie_block->gpsw.mask = 0UL; in kvm_s390_vcpu_initial_reset()
2558 vcpu->arch.sie_block->gpsw.addr = 0UL; in kvm_s390_vcpu_initial_reset()
2561 vcpu->arch.sie_block->ckc = 0UL; in kvm_s390_vcpu_initial_reset()
2562 vcpu->arch.sie_block->todpr = 0; in kvm_s390_vcpu_initial_reset()
2563 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64)); in kvm_s390_vcpu_initial_reset()
2564 vcpu->arch.sie_block->gcr[0] = CR0_UNUSED_56 | in kvm_s390_vcpu_initial_reset()
2567 vcpu->arch.sie_block->gcr[14] = CR14_UNUSED_32 | in kvm_s390_vcpu_initial_reset()
2570 vcpu->run->s.regs.fpc = 0; in kvm_s390_vcpu_initial_reset()
2571 vcpu->arch.sie_block->gbea = 1; in kvm_s390_vcpu_initial_reset()
2572 vcpu->arch.sie_block->pp = 0; in kvm_s390_vcpu_initial_reset()
2573 vcpu->arch.sie_block->fpf &= ~FPF_BPBC; in kvm_s390_vcpu_initial_reset()
2574 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; in kvm_s390_vcpu_initial_reset()
2576 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) in kvm_s390_vcpu_initial_reset()
2583 mutex_lock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
2585 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; in kvm_arch_vcpu_postcreate()
2586 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx; in kvm_arch_vcpu_postcreate()
2588 mutex_unlock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
2589 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_postcreate()
2590 vcpu->arch.gmap = vcpu->kvm->arch.gmap; in kvm_arch_vcpu_postcreate()
2593 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0) in kvm_arch_vcpu_postcreate()
2594 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC; in kvm_arch_vcpu_postcreate()
2596 vcpu->arch.enabled_gmap = vcpu->arch.gmap; in kvm_arch_vcpu_postcreate()
2601 if (!test_kvm_facility(vcpu->kvm, 76)) in kvm_s390_vcpu_crypto_setup()
2604 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA); in kvm_s390_vcpu_crypto_setup()
2606 if (vcpu->kvm->arch.crypto.aes_kw) in kvm_s390_vcpu_crypto_setup()
2607 vcpu->arch.sie_block->ecb3 |= ECB3_AES; in kvm_s390_vcpu_crypto_setup()
2608 if (vcpu->kvm->arch.crypto.dea_kw) in kvm_s390_vcpu_crypto_setup()
2609 vcpu->arch.sie_block->ecb3 |= ECB3_DEA; in kvm_s390_vcpu_crypto_setup()
2611 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; in kvm_s390_vcpu_crypto_setup()
2616 free_page(vcpu->arch.sie_block->cbrlo); in kvm_s390_vcpu_unsetup_cmma()
2617 vcpu->arch.sie_block->cbrlo = 0; in kvm_s390_vcpu_unsetup_cmma()
2622 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL); in kvm_s390_vcpu_setup_cmma()
2623 if (!vcpu->arch.sie_block->cbrlo) in kvm_s390_vcpu_setup_cmma()
2624 return -ENOMEM; in kvm_s390_vcpu_setup_cmma()
2630 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model; in kvm_s390_vcpu_setup_model()
2632 vcpu->arch.sie_block->ibc = model->ibc; in kvm_s390_vcpu_setup_model()
2633 if (test_kvm_facility(vcpu->kvm, 7)) in kvm_s390_vcpu_setup_model()
2634 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list; in kvm_s390_vcpu_setup_model()
2641 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | in kvm_arch_vcpu_setup()
2645 if (test_kvm_facility(vcpu->kvm, 78)) in kvm_arch_vcpu_setup()
2647 else if (test_kvm_facility(vcpu->kvm, 8)) in kvm_arch_vcpu_setup()
2654 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT; in kvm_arch_vcpu_setup()
2655 if (test_kvm_facility(vcpu->kvm, 9)) in kvm_arch_vcpu_setup()
2656 vcpu->arch.sie_block->ecb |= ECB_SRSI; in kvm_arch_vcpu_setup()
2657 if (test_kvm_facility(vcpu->kvm, 73)) in kvm_arch_vcpu_setup()
2658 vcpu->arch.sie_block->ecb |= ECB_TE; in kvm_arch_vcpu_setup()
2660 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi) in kvm_arch_vcpu_setup()
2661 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI; in kvm_arch_vcpu_setup()
2662 if (test_kvm_facility(vcpu->kvm, 130)) in kvm_arch_vcpu_setup()
2663 vcpu->arch.sie_block->ecb2 |= ECB2_IEP; in kvm_arch_vcpu_setup()
2664 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI; in kvm_arch_vcpu_setup()
2666 vcpu->arch.sie_block->eca |= ECA_CEI; in kvm_arch_vcpu_setup()
2668 vcpu->arch.sie_block->eca |= ECA_IB; in kvm_arch_vcpu_setup()
2670 vcpu->arch.sie_block->eca |= ECA_SII; in kvm_arch_vcpu_setup()
2672 vcpu->arch.sie_block->eca |= ECA_SIGPI; in kvm_arch_vcpu_setup()
2673 if (test_kvm_facility(vcpu->kvm, 129)) { in kvm_arch_vcpu_setup()
2674 vcpu->arch.sie_block->eca |= ECA_VX; in kvm_arch_vcpu_setup()
2675 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; in kvm_arch_vcpu_setup()
2677 if (test_kvm_facility(vcpu->kvm, 139)) in kvm_arch_vcpu_setup()
2678 vcpu->arch.sie_block->ecd |= ECD_MEF; in kvm_arch_vcpu_setup()
2679 if (test_kvm_facility(vcpu->kvm, 156)) in kvm_arch_vcpu_setup()
2680 vcpu->arch.sie_block->ecd |= ECD_ETOKENF; in kvm_arch_vcpu_setup()
2681 if (vcpu->arch.sie_block->gd) { in kvm_arch_vcpu_setup()
2682 vcpu->arch.sie_block->eca |= ECA_AIV; in kvm_arch_vcpu_setup()
2683 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u", in kvm_arch_vcpu_setup()
2684 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id); in kvm_arch_vcpu_setup()
2686 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx) in kvm_arch_vcpu_setup()
2688 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb; in kvm_arch_vcpu_setup()
2693 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; in kvm_arch_vcpu_setup()
2695 if (vcpu->kvm->arch.use_cmma) { in kvm_arch_vcpu_setup()
2700 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in kvm_arch_vcpu_setup()
2701 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; in kvm_arch_vcpu_setup()
2713 int rc = -EINVAL; in kvm_arch_vcpu_create()
2718 rc = -ENOMEM; in kvm_arch_vcpu_create()
2729 vcpu->arch.sie_block = &sie_page->sie_block; in kvm_arch_vcpu_create()
2730 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; in kvm_arch_vcpu_create()
2733 vcpu->arch.sie_block->mso = 0; in kvm_arch_vcpu_create()
2734 vcpu->arch.sie_block->msl = sclp.hamax; in kvm_arch_vcpu_create()
2736 vcpu->arch.sie_block->icpua = id; in kvm_arch_vcpu_create()
2737 spin_lock_init(&vcpu->arch.local_int.lock); in kvm_arch_vcpu_create()
2738 vcpu->arch.sie_block->gd = (u32)(u64)kvm->arch.gisa; in kvm_arch_vcpu_create()
2739 if (vcpu->arch.sie_block->gd && sclp.has_gisaf) in kvm_arch_vcpu_create()
2740 vcpu->arch.sie_block->gd |= GISA_FORMAT1; in kvm_arch_vcpu_create()
2741 seqcount_init(&vcpu->arch.cputm_seqcount); in kvm_arch_vcpu_create()
2747 vcpu->arch.sie_block); in kvm_arch_vcpu_create()
2748 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block); in kvm_arch_vcpu_create()
2752 free_page((unsigned long)(vcpu->arch.sie_block)); in kvm_arch_vcpu_create()
2766 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE); in kvm_arch_vcpu_in_kernel()
2771 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_block()
2777 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_unblock()
2782 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_request()
2788 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_request_handled()
2798 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) in exit_sie()
2812 struct kvm *kvm = gmap->private; in kvm_gmap_notifier()
2825 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) { in kvm_gmap_notifier()
2826 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx", in kvm_gmap_notifier()
2843 int r = -EINVAL; in kvm_arch_vcpu_ioctl_get_one_reg()
2845 switch (reg->id) { in kvm_arch_vcpu_ioctl_get_one_reg()
2847 r = put_user(vcpu->arch.sie_block->todpr, in kvm_arch_vcpu_ioctl_get_one_reg()
2848 (u32 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
2851 r = put_user(vcpu->arch.sie_block->epoch, in kvm_arch_vcpu_ioctl_get_one_reg()
2852 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
2856 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
2859 r = put_user(vcpu->arch.sie_block->ckc, in kvm_arch_vcpu_ioctl_get_one_reg()
2860 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
2863 r = put_user(vcpu->arch.pfault_token, in kvm_arch_vcpu_ioctl_get_one_reg()
2864 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
2867 r = put_user(vcpu->arch.pfault_compare, in kvm_arch_vcpu_ioctl_get_one_reg()
2868 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
2871 r = put_user(vcpu->arch.pfault_select, in kvm_arch_vcpu_ioctl_get_one_reg()
2872 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
2875 r = put_user(vcpu->arch.sie_block->pp, in kvm_arch_vcpu_ioctl_get_one_reg()
2876 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
2879 r = put_user(vcpu->arch.sie_block->gbea, in kvm_arch_vcpu_ioctl_get_one_reg()
2880 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
2892 int r = -EINVAL; in kvm_arch_vcpu_ioctl_set_one_reg()
2895 switch (reg->id) { in kvm_arch_vcpu_ioctl_set_one_reg()
2897 r = get_user(vcpu->arch.sie_block->todpr, in kvm_arch_vcpu_ioctl_set_one_reg()
2898 (u32 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
2901 r = get_user(vcpu->arch.sie_block->epoch, in kvm_arch_vcpu_ioctl_set_one_reg()
2902 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
2905 r = get_user(val, (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
2910 r = get_user(vcpu->arch.sie_block->ckc, in kvm_arch_vcpu_ioctl_set_one_reg()
2911 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
2914 r = get_user(vcpu->arch.pfault_token, in kvm_arch_vcpu_ioctl_set_one_reg()
2915 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
2916 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in kvm_arch_vcpu_ioctl_set_one_reg()
2920 r = get_user(vcpu->arch.pfault_compare, in kvm_arch_vcpu_ioctl_set_one_reg()
2921 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
2924 r = get_user(vcpu->arch.pfault_select, in kvm_arch_vcpu_ioctl_set_one_reg()
2925 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
2928 r = get_user(vcpu->arch.sie_block->pp, in kvm_arch_vcpu_ioctl_set_one_reg()
2929 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
2932 r = get_user(vcpu->arch.sie_block->gbea, in kvm_arch_vcpu_ioctl_set_one_reg()
2933 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
2951 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs)); in kvm_arch_vcpu_ioctl_set_regs()
2959 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs)); in kvm_arch_vcpu_ioctl_get_regs()
2969 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs)); in kvm_arch_vcpu_ioctl_set_sregs()
2970 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); in kvm_arch_vcpu_ioctl_set_sregs()
2981 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs)); in kvm_arch_vcpu_ioctl_get_sregs()
2982 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); in kvm_arch_vcpu_ioctl_get_sregs()
2994 if (test_fp_ctl(fpu->fpc)) { in kvm_arch_vcpu_ioctl_set_fpu()
2995 ret = -EINVAL; in kvm_arch_vcpu_ioctl_set_fpu()
2998 vcpu->run->s.regs.fpc = fpu->fpc; in kvm_arch_vcpu_ioctl_set_fpu()
3000 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs, in kvm_arch_vcpu_ioctl_set_fpu()
3001 (freg_t *) fpu->fprs); in kvm_arch_vcpu_ioctl_set_fpu()
3003 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs)); in kvm_arch_vcpu_ioctl_set_fpu()
3017 convert_vx_to_fp((freg_t *) fpu->fprs, in kvm_arch_vcpu_ioctl_get_fpu()
3018 (__vector128 *) vcpu->run->s.regs.vrs); in kvm_arch_vcpu_ioctl_get_fpu()
3020 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs)); in kvm_arch_vcpu_ioctl_get_fpu()
3021 fpu->fpc = vcpu->run->s.regs.fpc; in kvm_arch_vcpu_ioctl_get_fpu()
3032 rc = -EBUSY; in kvm_arch_vcpu_ioctl_set_initial_psw()
3034 vcpu->run->psw_mask = psw.mask; in kvm_arch_vcpu_ioctl_set_initial_psw()
3035 vcpu->run->psw_addr = psw.addr; in kvm_arch_vcpu_ioctl_set_initial_psw()
3043 return -EINVAL; /* not implemented yet */ in kvm_arch_vcpu_ioctl_translate()
3057 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
3060 if (dbg->control & ~VALID_GUESTDBG_FLAGS) { in kvm_arch_vcpu_ioctl_set_guest_debug()
3061 rc = -EINVAL; in kvm_arch_vcpu_ioctl_set_guest_debug()
3065 rc = -EINVAL; in kvm_arch_vcpu_ioctl_set_guest_debug()
3069 if (dbg->control & KVM_GUESTDBG_ENABLE) { in kvm_arch_vcpu_ioctl_set_guest_debug()
3070 vcpu->guest_debug = dbg->control; in kvm_arch_vcpu_ioctl_set_guest_debug()
3074 if (dbg->control & KVM_GUESTDBG_USE_HW_BP) in kvm_arch_vcpu_ioctl_set_guest_debug()
3078 vcpu->arch.guestdbg.last_bp = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
3082 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
3114 /* user space knows about this interface - let it control the state */ in kvm_arch_vcpu_ioctl_set_mpstate()
3115 vcpu->kvm->arch.user_cpu_state_ctrl = 1; in kvm_arch_vcpu_ioctl_set_mpstate()
3117 switch (mp_state->mp_state) { in kvm_arch_vcpu_ioctl_set_mpstate()
3126 /* fall through - CHECK_STOP and LOAD are not supported yet */ in kvm_arch_vcpu_ioctl_set_mpstate()
3128 rc = -ENXIO; in kvm_arch_vcpu_ioctl_set_mpstate()
3147 * We use MMU_RELOAD just to re-arm the ipte notifier for the in kvm_s390_handle_requests()
3155 rc = gmap_mprotect_notify(vcpu->arch.gmap, in kvm_s390_handle_requests()
3166 vcpu->arch.sie_block->ihcpu = 0xffff; in kvm_s390_handle_requests()
3172 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); in kvm_s390_handle_requests()
3180 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); in kvm_s390_handle_requests()
3187 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC; in kvm_s390_handle_requests()
3197 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA; in kvm_s390_handle_requests()
3203 * Re-enable CMM virtualization if CMMA is available and in kvm_s390_handle_requests()
3206 if ((vcpu->kvm->arch.use_cmma) && in kvm_s390_handle_requests()
3207 (vcpu->kvm->mm->context.uses_cmm)) in kvm_s390_handle_requests()
3208 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA; in kvm_s390_handle_requests()
3225 mutex_lock(&kvm->lock); in kvm_s390_set_tod_clock()
3230 kvm->arch.epoch = gtod->tod - htod.tod; in kvm_s390_set_tod_clock()
3231 kvm->arch.epdx = 0; in kvm_s390_set_tod_clock()
3233 kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx; in kvm_s390_set_tod_clock()
3234 if (kvm->arch.epoch > gtod->tod) in kvm_s390_set_tod_clock()
3235 kvm->arch.epdx -= 1; in kvm_s390_set_tod_clock()
3240 vcpu->arch.sie_block->epoch = kvm->arch.epoch; in kvm_s390_set_tod_clock()
3241 vcpu->arch.sie_block->epdx = kvm->arch.epdx; in kvm_s390_set_tod_clock()
3246 mutex_unlock(&kvm->lock); in kvm_s390_set_tod_clock()
3250 * kvm_arch_fault_in_page - fault-in guest page if necessary
3255 * Make sure that a guest page has been faulted-in on the host.
3261 return gmap_fault(vcpu->arch.gmap, gpa, in kvm_arch_fault_in_page()
3278 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); in __kvm_inject_pfault_token()
3285 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token); in kvm_arch_async_page_not_present()
3286 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token); in kvm_arch_async_page_not_present()
3292 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token); in kvm_arch_async_page_present()
3293 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token); in kvm_arch_async_page_present()
3317 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in kvm_arch_setup_async_pf()
3319 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) != in kvm_arch_setup_async_pf()
3320 vcpu->arch.pfault_compare) in kvm_arch_setup_async_pf()
3326 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK)) in kvm_arch_setup_async_pf()
3328 if (!vcpu->arch.gmap->pfault_enabled) in kvm_arch_setup_async_pf()
3331 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr)); in kvm_arch_setup_async_pf()
3332 hva += current->thread.gmap_addr & ~PAGE_MASK; in kvm_arch_setup_async_pf()
3333 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8)) in kvm_arch_setup_async_pf()
3336 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch); in kvm_arch_setup_async_pf()
3351 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14]; in vcpu_pre_run()
3352 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15]; in vcpu_pre_run()
3360 if (!kvm_is_ucontrol(vcpu->kvm)) { in vcpu_pre_run()
3375 vcpu->arch.sie_block->icptcode = 0; in vcpu_pre_run()
3376 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); in vcpu_pre_run()
3402 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1); in vcpu_post_run_fault_in_sie()
3407 /* Instruction-Fetching Exceptions - we can't detect the ilen. in vcpu_post_run_fault_in_sie()
3411 pgm_info = vcpu->arch.pgm; in vcpu_post_run_fault_in_sie()
3425 vcpu->arch.sie_block->icptcode); in vcpu_post_run()
3426 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); in vcpu_post_run()
3431 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14; in vcpu_post_run()
3432 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15; in vcpu_post_run()
3434 if (exit_reason == -EINTR) { in vcpu_post_run()
3436 sie_page = container_of(vcpu->arch.sie_block, in vcpu_post_run()
3438 mcck_info = &sie_page->mcck_info; in vcpu_post_run()
3443 if (vcpu->arch.sie_block->icptcode > 0) { in vcpu_post_run()
3446 if (rc != -EOPNOTSUPP) in vcpu_post_run()
3448 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC; in vcpu_post_run()
3449 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; in vcpu_post_run()
3450 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; in vcpu_post_run()
3451 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; in vcpu_post_run()
3452 return -EREMOTE; in vcpu_post_run()
3453 } else if (exit_reason != -EFAULT) { in vcpu_post_run()
3454 vcpu->stat.exit_null++; in vcpu_post_run()
3456 } else if (kvm_is_ucontrol(vcpu->kvm)) { in vcpu_post_run()
3457 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL; in vcpu_post_run()
3458 vcpu->run->s390_ucontrol.trans_exc_code = in vcpu_post_run()
3459 current->thread.gmap_addr; in vcpu_post_run()
3460 vcpu->run->s390_ucontrol.pgm_code = 0x10; in vcpu_post_run()
3461 return -EREMOTE; in vcpu_post_run()
3462 } else if (current->thread.gmap_pfault) { in vcpu_post_run()
3464 current->thread.gmap_pfault = 0; in vcpu_post_run()
3467 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1); in vcpu_post_run()
3477 * We try to hold kvm->srcu during most of vcpu_run (except when run- in __vcpu_run()
3480 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in __vcpu_run()
3487 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in __vcpu_run()
3496 exit_reason = sie64a(vcpu->arch.sie_block, in __vcpu_run()
3497 vcpu->run->s.regs.gprs); in __vcpu_run()
3502 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in __vcpu_run()
3507 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in __vcpu_run()
3516 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb; in sync_regs()
3517 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb; in sync_regs()
3518 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; in sync_regs()
3519 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; in sync_regs()
3520 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) in sync_regs()
3521 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); in sync_regs()
3522 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) { in sync_regs()
3523 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128); in sync_regs()
3527 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) { in sync_regs()
3528 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm); in sync_regs()
3529 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc; in sync_regs()
3530 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr; in sync_regs()
3531 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp; in sync_regs()
3532 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea; in sync_regs()
3534 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) { in sync_regs()
3535 vcpu->arch.pfault_token = kvm_run->s.regs.pft; in sync_regs()
3536 vcpu->arch.pfault_select = kvm_run->s.regs.pfs; in sync_regs()
3537 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc; in sync_regs()
3538 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in sync_regs()
3545 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) && in sync_regs()
3546 test_kvm_facility(vcpu->kvm, 64) && in sync_regs()
3547 riccb->v && in sync_regs()
3548 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) { in sync_regs()
3550 vcpu->arch.sie_block->ecb3 |= ECB3_RI; in sync_regs()
3553 * If userspace sets the gscb (e.g. after migration) to non-zero, in sync_regs()
3556 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) && in sync_regs()
3557 test_kvm_facility(vcpu->kvm, 133) && in sync_regs()
3558 gscb->gssm && in sync_regs()
3559 !vcpu->arch.gs_enabled) { in sync_regs()
3561 vcpu->arch.sie_block->ecb |= ECB_GS; in sync_regs()
3562 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; in sync_regs()
3563 vcpu->arch.gs_enabled = 1; in sync_regs()
3565 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) && in sync_regs()
3566 test_kvm_facility(vcpu->kvm, 82)) { in sync_regs()
3567 vcpu->arch.sie_block->fpf &= ~FPF_BPBC; in sync_regs()
3568 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0; in sync_regs()
3570 save_access_regs(vcpu->arch.host_acrs); in sync_regs()
3571 restore_access_regs(vcpu->run->s.regs.acrs); in sync_regs()
3574 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc; in sync_regs()
3575 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs; in sync_regs()
3577 current->thread.fpu.regs = vcpu->run->s.regs.vrs; in sync_regs()
3579 current->thread.fpu.regs = vcpu->run->s.regs.fprs; in sync_regs()
3580 current->thread.fpu.fpc = vcpu->run->s.regs.fpc; in sync_regs()
3581 if (test_fp_ctl(current->thread.fpu.fpc)) in sync_regs()
3583 current->thread.fpu.fpc = 0; in sync_regs()
3587 if (current->thread.gs_cb) { in sync_regs()
3588 vcpu->arch.host_gscb = current->thread.gs_cb; in sync_regs()
3589 save_gs_cb(vcpu->arch.host_gscb); in sync_regs()
3591 if (vcpu->arch.gs_enabled) { in sync_regs()
3592 current->thread.gs_cb = (struct gs_cb *) in sync_regs()
3593 &vcpu->run->s.regs.gscb; in sync_regs()
3594 restore_gs_cb(current->thread.gs_cb); in sync_regs()
3600 kvm_run->kvm_dirty_regs = 0; in sync_regs()
3605 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; in store_regs()
3606 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; in store_regs()
3607 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu); in store_regs()
3608 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); in store_regs()
3609 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu); in store_regs()
3610 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc; in store_regs()
3611 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr; in store_regs()
3612 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp; in store_regs()
3613 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea; in store_regs()
3614 kvm_run->s.regs.pft = vcpu->arch.pfault_token; in store_regs()
3615 kvm_run->s.regs.pfs = vcpu->arch.pfault_select; in store_regs()
3616 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; in store_regs()
3617 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC; in store_regs()
3618 save_access_regs(vcpu->run->s.regs.acrs); in store_regs()
3619 restore_access_regs(vcpu->arch.host_acrs); in store_regs()
3622 vcpu->run->s.regs.fpc = current->thread.fpu.fpc; in store_regs()
3624 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc; in store_regs()
3625 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs; in store_regs()
3628 if (vcpu->arch.gs_enabled) in store_regs()
3629 save_gs_cb(current->thread.gs_cb); in store_regs()
3631 current->thread.gs_cb = vcpu->arch.host_gscb; in store_regs()
3632 restore_gs_cb(vcpu->arch.host_gscb); in store_regs()
3634 if (!vcpu->arch.host_gscb) in store_regs()
3636 vcpu->arch.host_gscb = NULL; in store_regs()
3645 if (kvm_run->immediate_exit) in kvm_arch_vcpu_ioctl_run()
3646 return -EINTR; in kvm_arch_vcpu_ioctl_run()
3658 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { in kvm_arch_vcpu_ioctl_run()
3662 vcpu->vcpu_id); in kvm_arch_vcpu_ioctl_run()
3663 rc = -EINVAL; in kvm_arch_vcpu_ioctl_run()
3674 kvm_run->exit_reason = KVM_EXIT_INTR; in kvm_arch_vcpu_ioctl_run()
3675 rc = -EINTR; in kvm_arch_vcpu_ioctl_run()
3683 if (rc == -EREMOTE) { in kvm_arch_vcpu_ioctl_run()
3693 vcpu->stat.exit_userspace++; in kvm_arch_vcpu_ioctl_run()
3702 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
3703 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
3716 return -EFAULT; in kvm_s390_store_status_unloaded()
3720 return -EFAULT; in kvm_s390_store_status_unloaded()
3723 gpa -= __LC_FPREGS_SAVE_AREA; in kvm_s390_store_status_unloaded()
3727 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs); in kvm_s390_store_status_unloaded()
3732 vcpu->run->s.regs.fprs, 128); in kvm_s390_store_status_unloaded()
3735 vcpu->run->s.regs.gprs, 128); in kvm_s390_store_status_unloaded()
3737 &vcpu->arch.sie_block->gpsw, 16); in kvm_s390_store_status_unloaded()
3741 &vcpu->run->s.regs.fpc, 4); in kvm_s390_store_status_unloaded()
3743 &vcpu->arch.sie_block->todpr, 4); in kvm_s390_store_status_unloaded()
3747 clkcomp = vcpu->arch.sie_block->ckc >> 8; in kvm_s390_store_status_unloaded()
3751 &vcpu->run->s.regs.acrs, 64); in kvm_s390_store_status_unloaded()
3753 &vcpu->arch.sie_block->gcr, 128); in kvm_s390_store_status_unloaded()
3754 return rc ? -EFAULT : 0; in kvm_s390_store_status_unloaded()
3765 vcpu->run->s.regs.fpc = current->thread.fpu.fpc; in kvm_s390_vcpu_store_status()
3766 save_access_regs(vcpu->run->s.regs.acrs); in kvm_s390_vcpu_store_status()
3802 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1); in kvm_s390_vcpu_start()
3804 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
3805 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_start()
3808 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) in kvm_s390_vcpu_start()
3813 /* we're the only active VCPU -> speed it up */ in kvm_s390_vcpu_start()
3821 __disable_ibs_on_all_vcpus(vcpu->kvm); in kvm_s390_vcpu_start()
3830 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
3842 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0); in kvm_s390_vcpu_stop()
3844 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
3845 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_stop()
3854 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) { in kvm_s390_vcpu_stop()
3856 started_vcpu = vcpu->kvm->vcpus[i]; in kvm_s390_vcpu_stop()
3868 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
3877 if (cap->flags) in kvm_vcpu_ioctl_enable_cap()
3878 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
3880 switch (cap->cap) { in kvm_vcpu_ioctl_enable_cap()
3882 if (!vcpu->kvm->arch.css_support) { in kvm_vcpu_ioctl_enable_cap()
3883 vcpu->kvm->arch.css_support = 1; in kvm_vcpu_ioctl_enable_cap()
3884 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support"); in kvm_vcpu_ioctl_enable_cap()
3885 trace_kvm_s390_enable_css(vcpu->kvm); in kvm_vcpu_ioctl_enable_cap()
3890 r = -EINVAL; in kvm_vcpu_ioctl_enable_cap()
3899 void __user *uaddr = (void __user *)mop->buf; in kvm_s390_guest_mem_op()
3905 if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size) in kvm_s390_guest_mem_op()
3906 return -EINVAL; in kvm_s390_guest_mem_op()
3908 if (mop->size > MEM_OP_MAX_SIZE) in kvm_s390_guest_mem_op()
3909 return -E2BIG; in kvm_s390_guest_mem_op()
3911 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) { in kvm_s390_guest_mem_op()
3912 tmpbuf = vmalloc(mop->size); in kvm_s390_guest_mem_op()
3914 return -ENOMEM; in kvm_s390_guest_mem_op()
3917 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_s390_guest_mem_op()
3919 switch (mop->op) { in kvm_s390_guest_mem_op()
3921 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { in kvm_s390_guest_mem_op()
3922 r = check_gva_range(vcpu, mop->gaddr, mop->ar, in kvm_s390_guest_mem_op()
3923 mop->size, GACC_FETCH); in kvm_s390_guest_mem_op()
3926 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size); in kvm_s390_guest_mem_op()
3928 if (copy_to_user(uaddr, tmpbuf, mop->size)) in kvm_s390_guest_mem_op()
3929 r = -EFAULT; in kvm_s390_guest_mem_op()
3933 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { in kvm_s390_guest_mem_op()
3934 r = check_gva_range(vcpu, mop->gaddr, mop->ar, in kvm_s390_guest_mem_op()
3935 mop->size, GACC_STORE); in kvm_s390_guest_mem_op()
3938 if (copy_from_user(tmpbuf, uaddr, mop->size)) { in kvm_s390_guest_mem_op()
3939 r = -EFAULT; in kvm_s390_guest_mem_op()
3942 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size); in kvm_s390_guest_mem_op()
3945 r = -EINVAL; in kvm_s390_guest_mem_op()
3948 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); in kvm_s390_guest_mem_op()
3950 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0) in kvm_s390_guest_mem_op()
3951 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); in kvm_s390_guest_mem_op()
3960 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_async_ioctl()
3968 return -EFAULT; in kvm_arch_vcpu_async_ioctl()
3976 return -EFAULT; in kvm_arch_vcpu_async_ioctl()
3978 return -EINVAL; in kvm_arch_vcpu_async_ioctl()
3982 return -ENOIOCTLCMD; in kvm_arch_vcpu_async_ioctl()
3988 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl()
3997 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
3999 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
4004 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4016 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4030 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4034 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
4035 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4039 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr, in kvm_arch_vcpu_ioctl()
4047 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4051 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
4052 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4056 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr, in kvm_arch_vcpu_ioctl()
4062 r = gmap_fault(vcpu->arch.gmap, arg, 0); in kvm_arch_vcpu_ioctl()
4068 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4080 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4086 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4092 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4104 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4108 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4118 r = -ENOTTY; in kvm_arch_vcpu_ioctl()
4128 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET) in kvm_arch_vcpu_fault()
4129 && (kvm_is_ucontrol(vcpu->kvm))) { in kvm_arch_vcpu_fault()
4130 vmf->page = virt_to_page(vcpu->arch.sie_block); in kvm_arch_vcpu_fault()
4131 get_page(vmf->page); in kvm_arch_vcpu_fault()
4155 if (mem->userspace_addr & 0xffffful) in kvm_arch_prepare_memory_region()
4156 return -EINVAL; in kvm_arch_prepare_memory_region()
4158 if (mem->memory_size & 0xffffful) in kvm_arch_prepare_memory_region()
4159 return -EINVAL; in kvm_arch_prepare_memory_region()
4161 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit) in kvm_arch_prepare_memory_region()
4162 return -EINVAL; in kvm_arch_prepare_memory_region()
4177 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
4178 old->npages * PAGE_SIZE); in kvm_arch_commit_memory_region()
4181 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
4182 old->npages * PAGE_SIZE); in kvm_arch_commit_memory_region()
4187 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, in kvm_arch_commit_memory_region()
4188 mem->guest_phys_addr, mem->memory_size); in kvm_arch_commit_memory_region()
4209 vcpu->valid_wakeup = false; in kvm_arch_vcpu_block_finish()
4218 return -ENODEV; in kvm_s390_init()
4223 return -EINVAL; in kvm_s390_init()