Lines Matching +full:ipa +full:- +full:clock +full:- +full:query
1 // SPDX-License-Identifier: GPL-2.0
14 #define KMSG_COMPONENT "kvm-s390"
36 #include <asm/asm-offsets.h>
48 #include "kvm-s390.h"
53 #include "trace-s390.h"
201 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
222 /* available subfunctions indicated via query / "test bit" */
233 /* every s390 is virtualization enabled ;-) */ in kvm_arch_hardware_enable()
253 * -delta to the epoch. in kvm_clock_sync_scb()
255 delta = -delta; in kvm_clock_sync_scb()
257 /* sign-extension - we're adding to signed values below */ in kvm_clock_sync_scb()
259 delta_idx = -1; in kvm_clock_sync_scb()
261 scb->epoch += delta; in kvm_clock_sync_scb()
262 if (scb->ecd & ECD_MEF) { in kvm_clock_sync_scb()
263 scb->epdx += delta_idx; in kvm_clock_sync_scb()
264 if (scb->epoch < delta) in kvm_clock_sync_scb()
265 scb->epdx += 1; in kvm_clock_sync_scb()
285 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta); in kvm_clock_sync()
287 kvm->arch.epoch = vcpu->arch.sie_block->epoch; in kvm_clock_sync()
288 kvm->arch.epdx = vcpu->arch.sie_block->epdx; in kvm_clock_sync()
290 if (vcpu->arch.cputm_enabled) in kvm_clock_sync()
291 vcpu->arch.cputm_start += *delta; in kvm_clock_sync()
292 if (vcpu->arch.vsie_block) in kvm_clock_sync()
293 kvm_clock_sync_scb(vcpu->arch.vsie_block, in kvm_clock_sync()
345 static __always_inline void __insn32_query(unsigned int opcode, u8 *query) in __insn32_query() argument
349 " lgr 1,%[query]\n" in __insn32_query()
353 : [query] "d" ((unsigned long)query), [opc] "i" (opcode) in __insn32_query()
369 if (test_facility(28)) /* TOD-clock steering */ in kvm_s390_cpu_feat_init()
465 int rc = -ENOMEM; in kvm_arch_init()
467 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long)); in kvm_arch_init()
469 return -ENOMEM; in kvm_arch_init()
471 kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long)); in kvm_arch_init()
512 return -EINVAL; in kvm_arch_dev_ioctl()
597 struct gmap *gmap = kvm->arch.gmap; in kvm_arch_sync_dirty_log()
601 cur_gfn = memslot->base_gfn; in kvm_arch_sync_dirty_log()
602 last_gfn = memslot->base_gfn + memslot->npages; in kvm_arch_sync_dirty_log()
637 return -EINVAL; in kvm_vm_ioctl_get_dirty_log()
639 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
641 r = -EINVAL; in kvm_vm_ioctl_get_dirty_log()
642 if (log->slot >= KVM_USER_MEM_SLOTS) in kvm_vm_ioctl_get_dirty_log()
652 memset(memslot->dirty_bitmap, 0, n); in kvm_vm_ioctl_get_dirty_log()
656 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
674 if (cap->flags) in kvm_vm_ioctl_enable_cap()
675 return -EINVAL; in kvm_vm_ioctl_enable_cap()
677 switch (cap->cap) { in kvm_vm_ioctl_enable_cap()
680 kvm->arch.use_irqchip = 1; in kvm_vm_ioctl_enable_cap()
685 kvm->arch.user_sigp = 1; in kvm_vm_ioctl_enable_cap()
689 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
690 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
691 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
693 set_kvm_facility(kvm->arch.model.fac_mask, 129); in kvm_vm_ioctl_enable_cap()
694 set_kvm_facility(kvm->arch.model.fac_list, 129); in kvm_vm_ioctl_enable_cap()
696 set_kvm_facility(kvm->arch.model.fac_mask, 134); in kvm_vm_ioctl_enable_cap()
697 set_kvm_facility(kvm->arch.model.fac_list, 134); in kvm_vm_ioctl_enable_cap()
700 set_kvm_facility(kvm->arch.model.fac_mask, 135); in kvm_vm_ioctl_enable_cap()
701 set_kvm_facility(kvm->arch.model.fac_list, 135); in kvm_vm_ioctl_enable_cap()
704 set_kvm_facility(kvm->arch.model.fac_mask, 148); in kvm_vm_ioctl_enable_cap()
705 set_kvm_facility(kvm->arch.model.fac_list, 148); in kvm_vm_ioctl_enable_cap()
708 set_kvm_facility(kvm->arch.model.fac_mask, 152); in kvm_vm_ioctl_enable_cap()
709 set_kvm_facility(kvm->arch.model.fac_list, 152); in kvm_vm_ioctl_enable_cap()
713 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
714 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
719 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
720 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
721 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
722 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
724 set_kvm_facility(kvm->arch.model.fac_mask, 64); in kvm_vm_ioctl_enable_cap()
725 set_kvm_facility(kvm->arch.model.fac_list, 64); in kvm_vm_ioctl_enable_cap()
728 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
733 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
734 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
735 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
737 set_kvm_facility(kvm->arch.model.fac_mask, 72); in kvm_vm_ioctl_enable_cap()
738 set_kvm_facility(kvm->arch.model.fac_list, 72); in kvm_vm_ioctl_enable_cap()
741 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
746 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
747 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
748 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
749 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
751 set_kvm_facility(kvm->arch.model.fac_mask, 133); in kvm_vm_ioctl_enable_cap()
752 set_kvm_facility(kvm->arch.model.fac_list, 133); in kvm_vm_ioctl_enable_cap()
755 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
760 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
761 if (kvm->created_vcpus) in kvm_vm_ioctl_enable_cap()
762 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
763 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_enable_cap()
764 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
767 mmap_write_lock(kvm->mm); in kvm_vm_ioctl_enable_cap()
768 kvm->mm->context.allow_gmap_hpage_1m = 1; in kvm_vm_ioctl_enable_cap()
769 mmap_write_unlock(kvm->mm); in kvm_vm_ioctl_enable_cap()
775 kvm->arch.use_skf = 0; in kvm_vm_ioctl_enable_cap()
776 kvm->arch.use_pfmfi = 0; in kvm_vm_ioctl_enable_cap()
778 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
784 kvm->arch.user_stsi = 1; in kvm_vm_ioctl_enable_cap()
789 kvm->arch.user_instr0 = 1; in kvm_vm_ioctl_enable_cap()
794 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
804 switch (attr->attr) { in kvm_s390_get_mem_control()
807 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes", in kvm_s390_get_mem_control()
808 kvm->arch.mem_limit); in kvm_s390_get_mem_control()
809 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr)) in kvm_s390_get_mem_control()
810 ret = -EFAULT; in kvm_s390_get_mem_control()
813 ret = -ENXIO; in kvm_s390_get_mem_control()
823 switch (attr->attr) { in kvm_s390_set_mem_control()
825 ret = -ENXIO; in kvm_s390_set_mem_control()
830 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
831 if (kvm->created_vcpus) in kvm_s390_set_mem_control()
832 ret = -EBUSY; in kvm_s390_set_mem_control()
833 else if (kvm->mm->context.allow_gmap_hpage_1m) in kvm_s390_set_mem_control()
834 ret = -EINVAL; in kvm_s390_set_mem_control()
836 kvm->arch.use_cmma = 1; in kvm_s390_set_mem_control()
838 kvm->arch.use_pfmfi = 0; in kvm_s390_set_mem_control()
841 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
844 ret = -ENXIO; in kvm_s390_set_mem_control()
847 ret = -EINVAL; in kvm_s390_set_mem_control()
848 if (!kvm->arch.use_cmma) in kvm_s390_set_mem_control()
852 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
853 idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_mem_control()
854 s390_reset_cmma(kvm->arch.gmap->mm); in kvm_s390_set_mem_control()
855 srcu_read_unlock(&kvm->srcu, idx); in kvm_s390_set_mem_control()
856 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
863 return -EINVAL; in kvm_s390_set_mem_control()
865 if (get_user(new_limit, (u64 __user *)attr->addr)) in kvm_s390_set_mem_control()
866 return -EFAULT; in kvm_s390_set_mem_control()
868 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT && in kvm_s390_set_mem_control()
869 new_limit > kvm->arch.mem_limit) in kvm_s390_set_mem_control()
870 return -E2BIG; in kvm_s390_set_mem_control()
873 return -EINVAL; in kvm_s390_set_mem_control()
877 new_limit -= 1; in kvm_s390_set_mem_control()
879 ret = -EBUSY; in kvm_s390_set_mem_control()
880 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
881 if (!kvm->created_vcpus) { in kvm_s390_set_mem_control()
883 struct gmap *new = gmap_create(current->mm, new_limit); in kvm_s390_set_mem_control()
886 ret = -ENOMEM; in kvm_s390_set_mem_control()
888 gmap_remove(kvm->arch.gmap); in kvm_s390_set_mem_control()
889 new->private = kvm; in kvm_s390_set_mem_control()
890 kvm->arch.gmap = new; in kvm_s390_set_mem_control()
894 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
897 (void *) kvm->arch.gmap->asce); in kvm_s390_set_mem_control()
901 ret = -ENXIO; in kvm_s390_set_mem_control()
927 mutex_lock(&kvm->lock); in kvm_s390_vm_set_crypto()
928 switch (attr->attr) { in kvm_s390_vm_set_crypto()
931 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
932 return -EINVAL; in kvm_s390_vm_set_crypto()
935 kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_vm_set_crypto()
936 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
937 kvm->arch.crypto.aes_kw = 1; in kvm_s390_vm_set_crypto()
942 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
943 return -EINVAL; in kvm_s390_vm_set_crypto()
946 kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_vm_set_crypto()
947 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
948 kvm->arch.crypto.dea_kw = 1; in kvm_s390_vm_set_crypto()
953 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
954 return -EINVAL; in kvm_s390_vm_set_crypto()
956 kvm->arch.crypto.aes_kw = 0; in kvm_s390_vm_set_crypto()
957 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
958 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
963 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
964 return -EINVAL; in kvm_s390_vm_set_crypto()
966 kvm->arch.crypto.dea_kw = 0; in kvm_s390_vm_set_crypto()
967 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
968 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
973 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
974 return -EOPNOTSUPP; in kvm_s390_vm_set_crypto()
976 kvm->arch.crypto.apie = 1; in kvm_s390_vm_set_crypto()
980 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
981 return -EOPNOTSUPP; in kvm_s390_vm_set_crypto()
983 kvm->arch.crypto.apie = 0; in kvm_s390_vm_set_crypto()
986 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
987 return -ENXIO; in kvm_s390_vm_set_crypto()
991 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1005 * Must be called with kvm->srcu held to avoid races on memslots, and with
1006 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
1016 if (kvm->arch.migration_mode) in kvm_s390_vm_start_migration()
1019 if (!slots || !slots->used_slots) in kvm_s390_vm_start_migration()
1020 return -EINVAL; in kvm_s390_vm_start_migration()
1022 if (!kvm->arch.use_cmma) { in kvm_s390_vm_start_migration()
1023 kvm->arch.migration_mode = 1; in kvm_s390_vm_start_migration()
1027 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) { in kvm_s390_vm_start_migration()
1028 ms = slots->memslots + slotnr; in kvm_s390_vm_start_migration()
1029 if (!ms->dirty_bitmap) in kvm_s390_vm_start_migration()
1030 return -EINVAL; in kvm_s390_vm_start_migration()
1038 ram_pages += ms->npages; in kvm_s390_vm_start_migration()
1040 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages); in kvm_s390_vm_start_migration()
1041 kvm->arch.migration_mode = 1; in kvm_s390_vm_start_migration()
1047 * Must be called with kvm->slots_lock to avoid races with ourselves and
1053 if (!kvm->arch.migration_mode) in kvm_s390_vm_stop_migration()
1055 kvm->arch.migration_mode = 0; in kvm_s390_vm_stop_migration()
1056 if (kvm->arch.use_cmma) in kvm_s390_vm_stop_migration()
1064 int res = -ENXIO; in kvm_s390_vm_set_migration()
1066 mutex_lock(&kvm->slots_lock); in kvm_s390_vm_set_migration()
1067 switch (attr->attr) { in kvm_s390_vm_set_migration()
1077 mutex_unlock(&kvm->slots_lock); in kvm_s390_vm_set_migration()
1085 u64 mig = kvm->arch.migration_mode; in kvm_s390_vm_get_migration()
1087 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS) in kvm_s390_vm_get_migration()
1088 return -ENXIO; in kvm_s390_vm_get_migration()
1090 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig))) in kvm_s390_vm_get_migration()
1091 return -EFAULT; in kvm_s390_vm_get_migration()
1101 if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod))) in kvm_s390_set_tod_ext()
1102 return -EFAULT; in kvm_s390_set_tod_ext()
1105 return -EINVAL; in kvm_s390_set_tod_ext()
1118 if (copy_from_user(>od_high, (void __user *)attr->addr, in kvm_s390_set_tod_high()
1120 return -EFAULT; in kvm_s390_set_tod_high()
1123 return -EINVAL; in kvm_s390_set_tod_high()
1133 if (copy_from_user(>od.tod, (void __user *)attr->addr, in kvm_s390_set_tod_low()
1135 return -EFAULT; in kvm_s390_set_tod_low()
1146 if (attr->flags) in kvm_s390_set_tod()
1147 return -EINVAL; in kvm_s390_set_tod()
1149 mutex_lock(&kvm->lock); in kvm_s390_set_tod()
1155 ret = -EOPNOTSUPP; in kvm_s390_set_tod()
1159 switch (attr->attr) { in kvm_s390_set_tod()
1170 ret = -ENXIO; in kvm_s390_set_tod()
1175 mutex_unlock(&kvm->lock); in kvm_s390_set_tod()
1188 gtod->tod = htod.tod + kvm->arch.epoch; in kvm_s390_get_tod_clock()
1189 gtod->epoch_idx = 0; in kvm_s390_get_tod_clock()
1191 gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx; in kvm_s390_get_tod_clock()
1192 if (gtod->tod < htod.tod) in kvm_s390_get_tod_clock()
1193 gtod->epoch_idx += 1; in kvm_s390_get_tod_clock()
1205 if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod))) in kvm_s390_get_tod_ext()
1206 return -EFAULT; in kvm_s390_get_tod_ext()
1208 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx", in kvm_s390_get_tod_ext()
1217 if (copy_to_user((void __user *)attr->addr, >od_high, in kvm_s390_get_tod_high()
1219 return -EFAULT; in kvm_s390_get_tod_high()
1220 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high); in kvm_s390_get_tod_high()
1230 if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod))) in kvm_s390_get_tod_low()
1231 return -EFAULT; in kvm_s390_get_tod_low()
1232 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod); in kvm_s390_get_tod_low()
1241 if (attr->flags) in kvm_s390_get_tod()
1242 return -EINVAL; in kvm_s390_get_tod()
1244 switch (attr->attr) { in kvm_s390_get_tod()
1255 ret = -ENXIO; in kvm_s390_get_tod()
1267 mutex_lock(&kvm->lock); in kvm_s390_set_processor()
1268 if (kvm->created_vcpus) { in kvm_s390_set_processor()
1269 ret = -EBUSY; in kvm_s390_set_processor()
1274 ret = -ENOMEM; in kvm_s390_set_processor()
1277 if (!copy_from_user(proc, (void __user *)attr->addr, in kvm_s390_set_processor()
1279 kvm->arch.model.cpuid = proc->cpuid; in kvm_s390_set_processor()
1282 if (lowest_ibc && proc->ibc) { in kvm_s390_set_processor()
1283 if (proc->ibc > unblocked_ibc) in kvm_s390_set_processor()
1284 kvm->arch.model.ibc = unblocked_ibc; in kvm_s390_set_processor()
1285 else if (proc->ibc < lowest_ibc) in kvm_s390_set_processor()
1286 kvm->arch.model.ibc = lowest_ibc; in kvm_s390_set_processor()
1288 kvm->arch.model.ibc = proc->ibc; in kvm_s390_set_processor()
1290 memcpy(kvm->arch.model.fac_list, proc->fac_list, in kvm_s390_set_processor()
1293 kvm->arch.model.ibc, in kvm_s390_set_processor()
1294 kvm->arch.model.cpuid); in kvm_s390_set_processor()
1296 kvm->arch.model.fac_list[0], in kvm_s390_set_processor()
1297 kvm->arch.model.fac_list[1], in kvm_s390_set_processor()
1298 kvm->arch.model.fac_list[2]); in kvm_s390_set_processor()
1300 ret = -EFAULT; in kvm_s390_set_processor()
1303 mutex_unlock(&kvm->lock); in kvm_s390_set_processor()
1312 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data))) in kvm_s390_set_processor_feat()
1313 return -EFAULT; in kvm_s390_set_processor_feat()
1317 return -EINVAL; in kvm_s390_set_processor_feat()
1319 mutex_lock(&kvm->lock); in kvm_s390_set_processor_feat()
1320 if (kvm->created_vcpus) { in kvm_s390_set_processor_feat()
1321 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_feat()
1322 return -EBUSY; in kvm_s390_set_processor_feat()
1324 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat, in kvm_s390_set_processor_feat()
1326 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_feat()
1337 mutex_lock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1338 if (kvm->created_vcpus) { in kvm_s390_set_processor_subfunc()
1339 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1340 return -EBUSY; in kvm_s390_set_processor_subfunc()
1343 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr, in kvm_s390_set_processor_subfunc()
1345 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1346 return -EFAULT; in kvm_s390_set_processor_subfunc()
1348 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1351 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], in kvm_s390_set_processor_subfunc()
1352 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], in kvm_s390_set_processor_subfunc()
1353 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], in kvm_s390_set_processor_subfunc()
1354 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); in kvm_s390_set_processor_subfunc()
1356 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], in kvm_s390_set_processor_subfunc()
1357 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); in kvm_s390_set_processor_subfunc()
1359 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], in kvm_s390_set_processor_subfunc()
1360 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); in kvm_s390_set_processor_subfunc()
1362 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], in kvm_s390_set_processor_subfunc()
1363 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); in kvm_s390_set_processor_subfunc()
1365 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], in kvm_s390_set_processor_subfunc()
1366 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); in kvm_s390_set_processor_subfunc()
1368 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], in kvm_s390_set_processor_subfunc()
1369 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); in kvm_s390_set_processor_subfunc()
1371 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], in kvm_s390_set_processor_subfunc()
1372 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); in kvm_s390_set_processor_subfunc()
1374 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], in kvm_s390_set_processor_subfunc()
1375 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); in kvm_s390_set_processor_subfunc()
1377 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], in kvm_s390_set_processor_subfunc()
1378 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); in kvm_s390_set_processor_subfunc()
1380 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], in kvm_s390_set_processor_subfunc()
1381 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); in kvm_s390_set_processor_subfunc()
1383 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], in kvm_s390_set_processor_subfunc()
1384 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); in kvm_s390_set_processor_subfunc()
1386 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], in kvm_s390_set_processor_subfunc()
1387 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); in kvm_s390_set_processor_subfunc()
1389 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], in kvm_s390_set_processor_subfunc()
1390 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); in kvm_s390_set_processor_subfunc()
1392 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], in kvm_s390_set_processor_subfunc()
1393 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); in kvm_s390_set_processor_subfunc()
1395 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], in kvm_s390_set_processor_subfunc()
1396 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); in kvm_s390_set_processor_subfunc()
1398 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], in kvm_s390_set_processor_subfunc()
1399 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], in kvm_s390_set_processor_subfunc()
1400 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], in kvm_s390_set_processor_subfunc()
1401 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); in kvm_s390_set_processor_subfunc()
1403 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], in kvm_s390_set_processor_subfunc()
1404 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], in kvm_s390_set_processor_subfunc()
1405 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], in kvm_s390_set_processor_subfunc()
1406 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); in kvm_s390_set_processor_subfunc()
1413 int ret = -ENXIO; in kvm_s390_set_cpu_model()
1415 switch (attr->attr) { in kvm_s390_set_cpu_model()
1436 ret = -ENOMEM; in kvm_s390_get_processor()
1439 proc->cpuid = kvm->arch.model.cpuid; in kvm_s390_get_processor()
1440 proc->ibc = kvm->arch.model.ibc; in kvm_s390_get_processor()
1441 memcpy(&proc->fac_list, kvm->arch.model.fac_list, in kvm_s390_get_processor()
1444 kvm->arch.model.ibc, in kvm_s390_get_processor()
1445 kvm->arch.model.cpuid); in kvm_s390_get_processor()
1447 kvm->arch.model.fac_list[0], in kvm_s390_get_processor()
1448 kvm->arch.model.fac_list[1], in kvm_s390_get_processor()
1449 kvm->arch.model.fac_list[2]); in kvm_s390_get_processor()
1450 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc))) in kvm_s390_get_processor()
1451 ret = -EFAULT; in kvm_s390_get_processor()
1464 ret = -ENOMEM; in kvm_s390_get_machine()
1467 get_cpu_id((struct cpuid *) &mach->cpuid); in kvm_s390_get_machine()
1468 mach->ibc = sclp.ibc; in kvm_s390_get_machine()
1469 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask, in kvm_s390_get_machine()
1471 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list, in kvm_s390_get_machine()
1474 kvm->arch.model.ibc, in kvm_s390_get_machine()
1475 kvm->arch.model.cpuid); in kvm_s390_get_machine()
1477 mach->fac_mask[0], in kvm_s390_get_machine()
1478 mach->fac_mask[1], in kvm_s390_get_machine()
1479 mach->fac_mask[2]); in kvm_s390_get_machine()
1481 mach->fac_list[0], in kvm_s390_get_machine()
1482 mach->fac_list[1], in kvm_s390_get_machine()
1483 mach->fac_list[2]); in kvm_s390_get_machine()
1484 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) in kvm_s390_get_machine()
1485 ret = -EFAULT; in kvm_s390_get_machine()
1496 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat, in kvm_s390_get_processor_feat()
1498 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data))) in kvm_s390_get_processor_feat()
1499 return -EFAULT; in kvm_s390_get_processor_feat()
1515 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data))) in kvm_s390_get_machine_feat()
1516 return -EFAULT; in kvm_s390_get_machine_feat()
1527 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs, in kvm_s390_get_processor_subfunc()
1529 return -EFAULT; in kvm_s390_get_processor_subfunc()
1532 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], in kvm_s390_get_processor_subfunc()
1533 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], in kvm_s390_get_processor_subfunc()
1534 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], in kvm_s390_get_processor_subfunc()
1535 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); in kvm_s390_get_processor_subfunc()
1537 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], in kvm_s390_get_processor_subfunc()
1538 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); in kvm_s390_get_processor_subfunc()
1540 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], in kvm_s390_get_processor_subfunc()
1541 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); in kvm_s390_get_processor_subfunc()
1543 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], in kvm_s390_get_processor_subfunc()
1544 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); in kvm_s390_get_processor_subfunc()
1546 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], in kvm_s390_get_processor_subfunc()
1547 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); in kvm_s390_get_processor_subfunc()
1549 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], in kvm_s390_get_processor_subfunc()
1550 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); in kvm_s390_get_processor_subfunc()
1552 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], in kvm_s390_get_processor_subfunc()
1553 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); in kvm_s390_get_processor_subfunc()
1555 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], in kvm_s390_get_processor_subfunc()
1556 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); in kvm_s390_get_processor_subfunc()
1558 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], in kvm_s390_get_processor_subfunc()
1559 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); in kvm_s390_get_processor_subfunc()
1561 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], in kvm_s390_get_processor_subfunc()
1562 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); in kvm_s390_get_processor_subfunc()
1564 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], in kvm_s390_get_processor_subfunc()
1565 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); in kvm_s390_get_processor_subfunc()
1567 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], in kvm_s390_get_processor_subfunc()
1568 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); in kvm_s390_get_processor_subfunc()
1570 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], in kvm_s390_get_processor_subfunc()
1571 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); in kvm_s390_get_processor_subfunc()
1573 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], in kvm_s390_get_processor_subfunc()
1574 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); in kvm_s390_get_processor_subfunc()
1576 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], in kvm_s390_get_processor_subfunc()
1577 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); in kvm_s390_get_processor_subfunc()
1579 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], in kvm_s390_get_processor_subfunc()
1580 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], in kvm_s390_get_processor_subfunc()
1581 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], in kvm_s390_get_processor_subfunc()
1582 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); in kvm_s390_get_processor_subfunc()
1584 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], in kvm_s390_get_processor_subfunc()
1585 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], in kvm_s390_get_processor_subfunc()
1586 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], in kvm_s390_get_processor_subfunc()
1587 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); in kvm_s390_get_processor_subfunc()
1595 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc, in kvm_s390_get_machine_subfunc()
1597 return -EFAULT; in kvm_s390_get_machine_subfunc()
1662 int ret = -ENXIO; in kvm_s390_get_cpu_model()
1664 switch (attr->attr) { in kvm_s390_get_cpu_model()
1691 switch (attr->group) { in kvm_s390_vm_set_attr()
1708 ret = -ENXIO; in kvm_s390_vm_set_attr()
1719 switch (attr->group) { in kvm_s390_vm_get_attr()
1733 ret = -ENXIO; in kvm_s390_vm_get_attr()
1744 switch (attr->group) { in kvm_s390_vm_has_attr()
1746 switch (attr->attr) { in kvm_s390_vm_has_attr()
1749 ret = sclp.has_cmma ? 0 : -ENXIO; in kvm_s390_vm_has_attr()
1755 ret = -ENXIO; in kvm_s390_vm_has_attr()
1760 switch (attr->attr) { in kvm_s390_vm_has_attr()
1766 ret = -ENXIO; in kvm_s390_vm_has_attr()
1771 switch (attr->attr) { in kvm_s390_vm_has_attr()
1781 ret = -ENXIO; in kvm_s390_vm_has_attr()
1786 switch (attr->attr) { in kvm_s390_vm_has_attr()
1795 ret = ap_instructions_available() ? 0 : -ENXIO; in kvm_s390_vm_has_attr()
1798 ret = -ENXIO; in kvm_s390_vm_has_attr()
1806 ret = -ENXIO; in kvm_s390_vm_has_attr()
1819 if (args->flags != 0) in kvm_s390_get_skeys()
1820 return -EINVAL; in kvm_s390_get_skeys()
1823 if (!mm_uses_skeys(current->mm)) in kvm_s390_get_skeys()
1827 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) in kvm_s390_get_skeys()
1828 return -EINVAL; in kvm_s390_get_skeys()
1830 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL); in kvm_s390_get_skeys()
1832 return -ENOMEM; in kvm_s390_get_skeys()
1834 mmap_read_lock(current->mm); in kvm_s390_get_skeys()
1835 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_get_skeys()
1836 for (i = 0; i < args->count; i++) { in kvm_s390_get_skeys()
1837 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_get_skeys()
1839 r = -EFAULT; in kvm_s390_get_skeys()
1843 r = get_guest_storage_key(current->mm, hva, &keys[i]); in kvm_s390_get_skeys()
1847 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_get_skeys()
1848 mmap_read_unlock(current->mm); in kvm_s390_get_skeys()
1851 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys, in kvm_s390_get_skeys()
1852 sizeof(uint8_t) * args->count); in kvm_s390_get_skeys()
1854 r = -EFAULT; in kvm_s390_get_skeys()
1868 if (args->flags != 0) in kvm_s390_set_skeys()
1869 return -EINVAL; in kvm_s390_set_skeys()
1872 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) in kvm_s390_set_skeys()
1873 return -EINVAL; in kvm_s390_set_skeys()
1875 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL); in kvm_s390_set_skeys()
1877 return -ENOMEM; in kvm_s390_set_skeys()
1879 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr, in kvm_s390_set_skeys()
1880 sizeof(uint8_t) * args->count); in kvm_s390_set_skeys()
1882 r = -EFAULT; in kvm_s390_set_skeys()
1892 mmap_read_lock(current->mm); in kvm_s390_set_skeys()
1893 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_skeys()
1894 while (i < args->count) { in kvm_s390_set_skeys()
1896 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_set_skeys()
1898 r = -EFAULT; in kvm_s390_set_skeys()
1904 r = -EINVAL; in kvm_s390_set_skeys()
1908 r = set_guest_storage_key(current->mm, hva, keys[i], 0); in kvm_s390_set_skeys()
1910 r = fixup_user_fault(current->mm, hva, in kvm_s390_set_skeys()
1918 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_set_skeys()
1919 mmap_read_unlock(current->mm); in kvm_s390_set_skeys()
1941 int start = 0, end = slots->used_slots; in gfn_to_memslot_approx()
1942 int slot = atomic_read(&slots->lru_slot); in gfn_to_memslot_approx()
1943 struct kvm_memory_slot *memslots = slots->memslots; in gfn_to_memslot_approx()
1950 slot = start + (end - start) / 2; in gfn_to_memslot_approx()
1958 if (start >= slots->used_slots) in gfn_to_memslot_approx()
1959 return slots->used_slots - 1; in gfn_to_memslot_approx()
1963 atomic_set(&slots->lru_slot, start); in gfn_to_memslot_approx()
1972 unsigned long pgstev, hva, cur_gfn = args->start_gfn; in kvm_s390_peek_cmma()
1974 args->count = 0; in kvm_s390_peek_cmma()
1975 while (args->count < bufsize) { in kvm_s390_peek_cmma()
1982 return args->count ? 0 : -EFAULT; in kvm_s390_peek_cmma()
1983 if (get_pgste(kvm->mm, hva, &pgstev) < 0) in kvm_s390_peek_cmma()
1985 res[args->count++] = (pgstev >> 24) & 0x43; in kvm_s390_peek_cmma()
1996 struct kvm_memory_slot *ms = slots->memslots + slotidx; in kvm_s390_next_dirty_cmma()
1997 unsigned long ofs = cur_gfn - ms->base_gfn; in kvm_s390_next_dirty_cmma()
1999 if (ms->base_gfn + ms->npages <= cur_gfn) { in kvm_s390_next_dirty_cmma()
2000 slotidx--; in kvm_s390_next_dirty_cmma()
2003 slotidx = slots->used_slots - 1; in kvm_s390_next_dirty_cmma()
2005 ms = slots->memslots + slotidx; in kvm_s390_next_dirty_cmma()
2009 if (cur_gfn < ms->base_gfn) in kvm_s390_next_dirty_cmma()
2012 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs); in kvm_s390_next_dirty_cmma()
2013 while ((slotidx > 0) && (ofs >= ms->npages)) { in kvm_s390_next_dirty_cmma()
2014 slotidx--; in kvm_s390_next_dirty_cmma()
2015 ms = slots->memslots + slotidx; in kvm_s390_next_dirty_cmma()
2016 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0); in kvm_s390_next_dirty_cmma()
2018 return ms->base_gfn + ofs; in kvm_s390_next_dirty_cmma()
2028 if (unlikely(!slots->used_slots)) in kvm_s390_get_cmma()
2031 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn); in kvm_s390_get_cmma()
2033 args->count = 0; in kvm_s390_get_cmma()
2034 args->start_gfn = cur_gfn; in kvm_s390_get_cmma()
2038 mem_end = slots->memslots[0].base_gfn + slots->memslots[0].npages; in kvm_s390_get_cmma()
2040 while (args->count < bufsize) { in kvm_s390_get_cmma()
2045 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms))) in kvm_s390_get_cmma()
2046 atomic64_dec(&kvm->arch.cmma_dirty_pages); in kvm_s390_get_cmma()
2047 if (get_pgste(kvm->mm, hva, &pgstev) < 0) in kvm_s390_get_cmma()
2050 res[args->count++] = (pgstev >> 24) & 0x43; in kvm_s390_get_cmma()
2059 (next_gfn - args->start_gfn >= bufsize)) in kvm_s390_get_cmma()
2063 if (cur_gfn - ms->base_gfn >= ms->npages) { in kvm_s390_get_cmma()
2087 if (!kvm->arch.use_cmma) in kvm_s390_get_cmma_bits()
2088 return -ENXIO; in kvm_s390_get_cmma_bits()
2090 if (args->flags & ~KVM_S390_CMMA_PEEK) in kvm_s390_get_cmma_bits()
2091 return -EINVAL; in kvm_s390_get_cmma_bits()
2092 /* Migration mode query, and we are not doing a migration */ in kvm_s390_get_cmma_bits()
2093 peek = !!(args->flags & KVM_S390_CMMA_PEEK); in kvm_s390_get_cmma_bits()
2094 if (!peek && !kvm->arch.migration_mode) in kvm_s390_get_cmma_bits()
2095 return -EINVAL; in kvm_s390_get_cmma_bits()
2097 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX); in kvm_s390_get_cmma_bits()
2098 if (!bufsize || !kvm->mm->context.uses_cmm) { in kvm_s390_get_cmma_bits()
2103 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) { in kvm_s390_get_cmma_bits()
2110 return -ENOMEM; in kvm_s390_get_cmma_bits()
2112 mmap_read_lock(kvm->mm); in kvm_s390_get_cmma_bits()
2113 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_get_cmma_bits()
2118 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_get_cmma_bits()
2119 mmap_read_unlock(kvm->mm); in kvm_s390_get_cmma_bits()
2121 if (kvm->arch.migration_mode) in kvm_s390_get_cmma_bits()
2122 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages); in kvm_s390_get_cmma_bits()
2124 args->remaining = 0; in kvm_s390_get_cmma_bits()
2126 if (copy_to_user((void __user *)args->values, values, args->count)) in kvm_s390_get_cmma_bits()
2127 ret = -EFAULT; in kvm_s390_get_cmma_bits()
2136 * set and the mm->context.uses_cmm flag is set.
2145 mask = args->mask; in kvm_s390_set_cmma_bits()
2147 if (!kvm->arch.use_cmma) in kvm_s390_set_cmma_bits()
2148 return -ENXIO; in kvm_s390_set_cmma_bits()
2150 if (args->flags != 0) in kvm_s390_set_cmma_bits()
2151 return -EINVAL; in kvm_s390_set_cmma_bits()
2153 if (args->count > KVM_S390_CMMA_SIZE_MAX) in kvm_s390_set_cmma_bits()
2154 return -EINVAL; in kvm_s390_set_cmma_bits()
2156 if (args->count == 0) in kvm_s390_set_cmma_bits()
2159 bits = vmalloc(array_size(sizeof(*bits), args->count)); in kvm_s390_set_cmma_bits()
2161 return -ENOMEM; in kvm_s390_set_cmma_bits()
2163 r = copy_from_user(bits, (void __user *)args->values, args->count); in kvm_s390_set_cmma_bits()
2165 r = -EFAULT; in kvm_s390_set_cmma_bits()
2169 mmap_read_lock(kvm->mm); in kvm_s390_set_cmma_bits()
2170 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_cmma_bits()
2171 for (i = 0; i < args->count; i++) { in kvm_s390_set_cmma_bits()
2172 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_set_cmma_bits()
2174 r = -EFAULT; in kvm_s390_set_cmma_bits()
2181 set_pgste_bits(kvm->mm, hva, mask, pgstev); in kvm_s390_set_cmma_bits()
2183 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_set_cmma_bits()
2184 mmap_read_unlock(kvm->mm); in kvm_s390_set_cmma_bits()
2186 if (!kvm->mm->context.uses_cmm) { in kvm_s390_set_cmma_bits()
2187 mmap_write_lock(kvm->mm); in kvm_s390_set_cmma_bits()
2188 kvm->mm->context.uses_cmm = 1; in kvm_s390_set_cmma_bits()
2189 mmap_write_unlock(kvm->mm); in kvm_s390_set_cmma_bits()
2212 mutex_lock(&vcpu->mutex); in kvm_s390_cpus_from_pv()
2216 ret = -EIO; in kvm_s390_cpus_from_pv()
2218 mutex_unlock(&vcpu->mutex); in kvm_s390_cpus_from_pv()
2231 mutex_lock(&vcpu->mutex); in kvm_s390_cpus_to_pv()
2233 mutex_unlock(&vcpu->mutex); in kvm_s390_cpus_to_pv()
2246 void __user *argp = (void __user *)cmd->data; in kvm_s390_handle_pv()
2248 switch (cmd->cmd) { in kvm_s390_handle_pv()
2250 r = -EINVAL; in kvm_s390_handle_pv()
2262 mmap_write_lock(current->mm); in kvm_s390_handle_pv()
2264 mmap_write_unlock(current->mm); in kvm_s390_handle_pv()
2268 r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2272 r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2277 set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2281 r = -EINVAL; in kvm_s390_handle_pv()
2285 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2293 r = kvm_s390_pv_deinit_vm(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2296 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2303 r = -EINVAL; in kvm_s390_handle_pv()
2307 r = -EFAULT; in kvm_s390_handle_pv()
2312 r = -EINVAL; in kvm_s390_handle_pv()
2316 r = -ENOMEM; in kvm_s390_handle_pv()
2321 r = -EFAULT; in kvm_s390_handle_pv()
2325 &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2333 r = -EINVAL; in kvm_s390_handle_pv()
2334 if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm)) in kvm_s390_handle_pv()
2337 r = -EFAULT; in kvm_s390_handle_pv()
2342 &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2346 r = -EINVAL; in kvm_s390_handle_pv()
2351 UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2352 KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc, in kvm_s390_handle_pv()
2353 cmd->rrc); in kvm_s390_handle_pv()
2357 r = -EINVAL; in kvm_s390_handle_pv()
2362 UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2364 cmd->rc, cmd->rrc); in kvm_s390_handle_pv()
2368 r = -EINVAL; in kvm_s390_handle_pv()
2373 UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2375 cmd->rc, cmd->rrc); in kvm_s390_handle_pv()
2379 r = -ENOTTY; in kvm_s390_handle_pv()
2387 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl()
2396 r = -EFAULT; in kvm_arch_vm_ioctl()
2405 r = -EINVAL; in kvm_arch_vm_ioctl()
2406 if (kvm->arch.use_irqchip) { in kvm_arch_vm_ioctl()
2414 r = -EFAULT; in kvm_arch_vm_ioctl()
2421 r = -EFAULT; in kvm_arch_vm_ioctl()
2428 r = -EFAULT; in kvm_arch_vm_ioctl()
2437 r = -EFAULT; in kvm_arch_vm_ioctl()
2447 r = -EFAULT; in kvm_arch_vm_ioctl()
2457 r = -EFAULT; in kvm_arch_vm_ioctl()
2460 mutex_lock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
2462 mutex_unlock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
2466 r = -EFAULT; in kvm_arch_vm_ioctl()
2473 r = -EFAULT; in kvm_arch_vm_ioctl()
2476 mutex_lock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
2478 mutex_unlock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
2485 kvm->arch.user_cpu_state_ctrl = 1; in kvm_arch_vm_ioctl()
2488 r = -EINVAL; in kvm_arch_vm_ioctl()
2492 r = -EFAULT; in kvm_arch_vm_ioctl()
2496 r = -EINVAL; in kvm_arch_vm_ioctl()
2499 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
2501 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
2503 r = -EFAULT; in kvm_arch_vm_ioctl()
2509 r = -ENOTTY; in kvm_arch_vm_ioctl()
2537 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb; in kvm_s390_set_crycb_format()
2539 /* Clear the CRYCB format bits - i.e., set format 0 by default */ in kvm_s390_set_crycb_format()
2540 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK); in kvm_s390_set_crycb_format()
2547 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2; in kvm_s390_set_crycb_format()
2549 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; in kvm_s390_set_crycb_format()
2555 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb; in kvm_arch_crypto_set_masks()
2557 mutex_lock(&kvm->lock); in kvm_arch_crypto_set_masks()
2560 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) { in kvm_arch_crypto_set_masks()
2562 memcpy(crycb->apcb1.apm, apm, 32); in kvm_arch_crypto_set_masks()
2565 memcpy(crycb->apcb1.aqm, aqm, 32); in kvm_arch_crypto_set_masks()
2568 memcpy(crycb->apcb1.adm, adm, 32); in kvm_arch_crypto_set_masks()
2574 memcpy(crycb->apcb0.apm, apm, 8); in kvm_arch_crypto_set_masks()
2575 memcpy(crycb->apcb0.aqm, aqm, 2); in kvm_arch_crypto_set_masks()
2576 memcpy(crycb->apcb0.adm, adm, 2); in kvm_arch_crypto_set_masks()
2588 mutex_unlock(&kvm->lock); in kvm_arch_crypto_set_masks()
2594 mutex_lock(&kvm->lock); in kvm_arch_crypto_clear_masks()
2597 memset(&kvm->arch.crypto.crycb->apcb0, 0, in kvm_arch_crypto_clear_masks()
2598 sizeof(kvm->arch.crypto.crycb->apcb0)); in kvm_arch_crypto_clear_masks()
2599 memset(&kvm->arch.crypto.crycb->apcb1, 0, in kvm_arch_crypto_clear_masks()
2600 sizeof(kvm->arch.crypto.crycb->apcb1)); in kvm_arch_crypto_clear_masks()
2606 mutex_unlock(&kvm->lock); in kvm_arch_crypto_clear_masks()
2621 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb; in kvm_s390_crypto_init()
2628 kvm->arch.crypto.aes_kw = 1; in kvm_s390_crypto_init()
2629 kvm->arch.crypto.dea_kw = 1; in kvm_s390_crypto_init()
2630 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_crypto_init()
2631 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_crypto_init()
2632 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_crypto_init()
2633 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_crypto_init()
2638 if (kvm->arch.use_esca) in sca_dispose()
2639 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block)); in sca_dispose()
2641 free_page((unsigned long)(kvm->arch.sca)); in sca_dispose()
2642 kvm->arch.sca = NULL; in sca_dispose()
2652 rc = -EINVAL; in kvm_arch_init_vm()
2667 rc = -ENOMEM; in kvm_arch_init_vm()
2671 rwlock_init(&kvm->arch.sca_lock); in kvm_arch_init_vm()
2673 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags); in kvm_arch_init_vm()
2674 if (!kvm->arch.sca) in kvm_arch_init_vm()
2680 kvm->arch.sca = (struct bsca_block *) in kvm_arch_init_vm()
2681 ((char *) kvm->arch.sca + sca_offset); in kvm_arch_init_vm()
2684 sprintf(debug_name, "kvm-%u", current->pid); in kvm_arch_init_vm()
2686 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long)); in kvm_arch_init_vm()
2687 if (!kvm->arch.dbf) in kvm_arch_init_vm()
2691 kvm->arch.sie_page2 = in kvm_arch_init_vm()
2693 if (!kvm->arch.sie_page2) in kvm_arch_init_vm()
2696 kvm->arch.sie_page2->kvm = kvm; in kvm_arch_init_vm()
2697 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list; in kvm_arch_init_vm()
2700 kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] & in kvm_arch_init_vm()
2703 kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] & in kvm_arch_init_vm()
2706 kvm->arch.model.subfuncs = kvm_s390_available_subfunc; in kvm_arch_init_vm()
2708 /* we are always in czam mode - even on pre z14 machines */ in kvm_arch_init_vm()
2709 set_kvm_facility(kvm->arch.model.fac_mask, 138); in kvm_arch_init_vm()
2710 set_kvm_facility(kvm->arch.model.fac_list, 138); in kvm_arch_init_vm()
2712 set_kvm_facility(kvm->arch.model.fac_mask, 74); in kvm_arch_init_vm()
2713 set_kvm_facility(kvm->arch.model.fac_list, 74); in kvm_arch_init_vm()
2715 set_kvm_facility(kvm->arch.model.fac_mask, 147); in kvm_arch_init_vm()
2716 set_kvm_facility(kvm->arch.model.fac_list, 147); in kvm_arch_init_vm()
2720 set_kvm_facility(kvm->arch.model.fac_mask, 65); in kvm_arch_init_vm()
2722 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid(); in kvm_arch_init_vm()
2723 kvm->arch.model.ibc = sclp.ibc & 0x0fff; in kvm_arch_init_vm()
2727 mutex_init(&kvm->arch.float_int.ais_lock); in kvm_arch_init_vm()
2728 spin_lock_init(&kvm->arch.float_int.lock); in kvm_arch_init_vm()
2730 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]); in kvm_arch_init_vm()
2731 init_waitqueue_head(&kvm->arch.ipte_wq); in kvm_arch_init_vm()
2732 mutex_init(&kvm->arch.ipte_mutex); in kvm_arch_init_vm()
2734 debug_register_view(kvm->arch.dbf, &debug_sprintf_view); in kvm_arch_init_vm()
2738 kvm->arch.gmap = NULL; in kvm_arch_init_vm()
2739 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT; in kvm_arch_init_vm()
2742 kvm->arch.mem_limit = TASK_SIZE_MAX; in kvm_arch_init_vm()
2744 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX, in kvm_arch_init_vm()
2746 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1); in kvm_arch_init_vm()
2747 if (!kvm->arch.gmap) in kvm_arch_init_vm()
2749 kvm->arch.gmap->private = kvm; in kvm_arch_init_vm()
2750 kvm->arch.gmap->pfault_enabled = 0; in kvm_arch_init_vm()
2753 kvm->arch.use_pfmfi = sclp.has_pfmfi; in kvm_arch_init_vm()
2754 kvm->arch.use_skf = sclp.has_skey; in kvm_arch_init_vm()
2755 spin_lock_init(&kvm->arch.start_stop_lock); in kvm_arch_init_vm()
2759 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid); in kvm_arch_init_vm()
2763 free_page((unsigned long)kvm->arch.sie_page2); in kvm_arch_init_vm()
2764 debug_unregister(kvm->arch.dbf); in kvm_arch_init_vm()
2775 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id); in kvm_arch_vcpu_destroy()
2778 if (!kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_destroy()
2781 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_destroy()
2782 gmap_remove(vcpu->arch.gmap); in kvm_arch_vcpu_destroy()
2784 if (vcpu->kvm->arch.use_cmma) in kvm_arch_vcpu_destroy()
2789 free_page((unsigned long)(vcpu->arch.sie_block)); in kvm_arch_vcpu_destroy()
2800 mutex_lock(&kvm->lock); in kvm_free_vcpus()
2801 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) in kvm_free_vcpus()
2802 kvm->vcpus[i] = NULL; in kvm_free_vcpus()
2804 atomic_set(&kvm->online_vcpus, 0); in kvm_free_vcpus()
2805 mutex_unlock(&kvm->lock); in kvm_free_vcpus()
2816 * We are already at the end of life and kvm->lock is not taken. in kvm_arch_destroy_vm()
2823 debug_unregister(kvm->arch.dbf); in kvm_arch_destroy_vm()
2824 free_page((unsigned long)kvm->arch.sie_page2); in kvm_arch_destroy_vm()
2826 gmap_remove(kvm->arch.gmap); in kvm_arch_destroy_vm()
2836 vcpu->arch.gmap = gmap_create(current->mm, -1UL); in __kvm_ucontrol_vcpu_init()
2837 if (!vcpu->arch.gmap) in __kvm_ucontrol_vcpu_init()
2838 return -ENOMEM; in __kvm_ucontrol_vcpu_init()
2839 vcpu->arch.gmap->private = vcpu->kvm; in __kvm_ucontrol_vcpu_init()
2848 read_lock(&vcpu->kvm->arch.sca_lock); in sca_del_vcpu()
2849 if (vcpu->kvm->arch.use_esca) { in sca_del_vcpu()
2850 struct esca_block *sca = vcpu->kvm->arch.sca; in sca_del_vcpu()
2852 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn); in sca_del_vcpu()
2853 sca->cpu[vcpu->vcpu_id].sda = 0; in sca_del_vcpu()
2855 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_del_vcpu()
2857 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn); in sca_del_vcpu()
2858 sca->cpu[vcpu->vcpu_id].sda = 0; in sca_del_vcpu()
2860 read_unlock(&vcpu->kvm->arch.sca_lock); in sca_del_vcpu()
2866 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
2869 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); in sca_add_vcpu()
2870 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca; in sca_add_vcpu()
2873 read_lock(&vcpu->kvm->arch.sca_lock); in sca_add_vcpu()
2874 if (vcpu->kvm->arch.use_esca) { in sca_add_vcpu()
2875 struct esca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
2877 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block; in sca_add_vcpu()
2878 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); in sca_add_vcpu()
2879 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU; in sca_add_vcpu()
2880 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA; in sca_add_vcpu()
2881 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn); in sca_add_vcpu()
2883 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
2885 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block; in sca_add_vcpu()
2886 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); in sca_add_vcpu()
2887 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca; in sca_add_vcpu()
2888 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn); in sca_add_vcpu()
2890 read_unlock(&vcpu->kvm->arch.sca_lock); in sca_add_vcpu()
2896 d->sda = s->sda; in sca_copy_entry()
2897 d->sigp_ctrl.c = s->sigp_ctrl.c; in sca_copy_entry()
2898 d->sigp_ctrl.scn = s->sigp_ctrl.scn; in sca_copy_entry()
2905 d->ipte_control = s->ipte_control; in sca_copy_b_to_e()
2906 d->mcn[0] = s->mcn; in sca_copy_b_to_e()
2908 sca_copy_entry(&d->cpu[i], &s->cpu[i]); in sca_copy_b_to_e()
2913 struct bsca_block *old_sca = kvm->arch.sca; in sca_switch_to_extended()
2919 if (kvm->arch.use_esca) in sca_switch_to_extended()
2924 return -ENOMEM; in sca_switch_to_extended()
2930 write_lock(&kvm->arch.sca_lock); in sca_switch_to_extended()
2935 vcpu->arch.sie_block->scaoh = scaoh; in sca_switch_to_extended()
2936 vcpu->arch.sie_block->scaol = scaol; in sca_switch_to_extended()
2937 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA; in sca_switch_to_extended()
2939 kvm->arch.sca = new_sca; in sca_switch_to_extended()
2940 kvm->arch.use_esca = 1; in sca_switch_to_extended()
2942 write_unlock(&kvm->arch.sca_lock); in sca_switch_to_extended()
2947 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)", in sca_switch_to_extended()
2948 old_sca, kvm->arch.sca); in sca_switch_to_extended()
2966 mutex_lock(&kvm->lock); in sca_can_add_vcpu()
2967 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm); in sca_can_add_vcpu()
2968 mutex_unlock(&kvm->lock); in sca_can_add_vcpu()
2976 WARN_ON_ONCE(vcpu->arch.cputm_start != 0); in __start_cpu_timer_accounting()
2977 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); in __start_cpu_timer_accounting()
2978 vcpu->arch.cputm_start = get_tod_clock_fast(); in __start_cpu_timer_accounting()
2979 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); in __start_cpu_timer_accounting()
2985 WARN_ON_ONCE(vcpu->arch.cputm_start == 0); in __stop_cpu_timer_accounting()
2986 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); in __stop_cpu_timer_accounting()
2987 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start; in __stop_cpu_timer_accounting()
2988 vcpu->arch.cputm_start = 0; in __stop_cpu_timer_accounting()
2989 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); in __stop_cpu_timer_accounting()
2995 WARN_ON_ONCE(vcpu->arch.cputm_enabled); in __enable_cpu_timer_accounting()
2996 vcpu->arch.cputm_enabled = true; in __enable_cpu_timer_accounting()
3003 WARN_ON_ONCE(!vcpu->arch.cputm_enabled); in __disable_cpu_timer_accounting()
3005 vcpu->arch.cputm_enabled = false; in __disable_cpu_timer_accounting()
3022 /* set the cpu timer - may only be called from the VCPU thread itself */
3026 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); in kvm_s390_set_cpu_timer()
3027 if (vcpu->arch.cputm_enabled) in kvm_s390_set_cpu_timer()
3028 vcpu->arch.cputm_start = get_tod_clock_fast(); in kvm_s390_set_cpu_timer()
3029 vcpu->arch.sie_block->cputm = cputm; in kvm_s390_set_cpu_timer()
3030 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); in kvm_s390_set_cpu_timer()
3034 /* update and get the cpu timer - can also be called from other VCPU threads */
3040 if (unlikely(!vcpu->arch.cputm_enabled)) in kvm_s390_get_cpu_timer()
3041 return vcpu->arch.sie_block->cputm; in kvm_s390_get_cpu_timer()
3045 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount); in kvm_s390_get_cpu_timer()
3050 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu); in kvm_s390_get_cpu_timer()
3051 value = vcpu->arch.sie_block->cputm; in kvm_s390_get_cpu_timer()
3053 if (likely(vcpu->arch.cputm_start)) in kvm_s390_get_cpu_timer()
3054 value -= get_tod_clock_fast() - vcpu->arch.cputm_start; in kvm_s390_get_cpu_timer()
3055 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1)); in kvm_s390_get_cpu_timer()
3063 gmap_enable(vcpu->arch.enabled_gmap); in kvm_arch_vcpu_load()
3065 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) in kvm_arch_vcpu_load()
3067 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
3072 vcpu->cpu = -1; in kvm_arch_vcpu_put()
3073 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) in kvm_arch_vcpu_put()
3076 vcpu->arch.enabled_gmap = gmap_get_enabled(); in kvm_arch_vcpu_put()
3077 gmap_disable(vcpu->arch.enabled_gmap); in kvm_arch_vcpu_put()
3083 mutex_lock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
3085 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; in kvm_arch_vcpu_postcreate()
3086 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx; in kvm_arch_vcpu_postcreate()
3088 mutex_unlock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
3089 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_postcreate()
3090 vcpu->arch.gmap = vcpu->kvm->arch.gmap; in kvm_arch_vcpu_postcreate()
3093 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0) in kvm_arch_vcpu_postcreate()
3094 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC; in kvm_arch_vcpu_postcreate()
3096 vcpu->arch.enabled_gmap = vcpu->arch.gmap; in kvm_arch_vcpu_postcreate()
3101 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) && in kvm_has_pckmo_subfunc()
3124 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76)) in kvm_s390_vcpu_crypto_setup()
3127 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; in kvm_s390_vcpu_crypto_setup()
3128 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA); in kvm_s390_vcpu_crypto_setup()
3129 vcpu->arch.sie_block->eca &= ~ECA_APIE; in kvm_s390_vcpu_crypto_setup()
3130 vcpu->arch.sie_block->ecd &= ~ECD_ECC; in kvm_s390_vcpu_crypto_setup()
3132 if (vcpu->kvm->arch.crypto.apie) in kvm_s390_vcpu_crypto_setup()
3133 vcpu->arch.sie_block->eca |= ECA_APIE; in kvm_s390_vcpu_crypto_setup()
3136 if (vcpu->kvm->arch.crypto.aes_kw) { in kvm_s390_vcpu_crypto_setup()
3137 vcpu->arch.sie_block->ecb3 |= ECB3_AES; in kvm_s390_vcpu_crypto_setup()
3139 if (kvm_has_pckmo_ecc(vcpu->kvm)) in kvm_s390_vcpu_crypto_setup()
3140 vcpu->arch.sie_block->ecd |= ECD_ECC; in kvm_s390_vcpu_crypto_setup()
3143 if (vcpu->kvm->arch.crypto.dea_kw) in kvm_s390_vcpu_crypto_setup()
3144 vcpu->arch.sie_block->ecb3 |= ECB3_DEA; in kvm_s390_vcpu_crypto_setup()
3149 free_page(vcpu->arch.sie_block->cbrlo); in kvm_s390_vcpu_unsetup_cmma()
3150 vcpu->arch.sie_block->cbrlo = 0; in kvm_s390_vcpu_unsetup_cmma()
3155 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL); in kvm_s390_vcpu_setup_cmma()
3156 if (!vcpu->arch.sie_block->cbrlo) in kvm_s390_vcpu_setup_cmma()
3157 return -ENOMEM; in kvm_s390_vcpu_setup_cmma()
3163 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model; in kvm_s390_vcpu_setup_model()
3165 vcpu->arch.sie_block->ibc = model->ibc; in kvm_s390_vcpu_setup_model()
3166 if (test_kvm_facility(vcpu->kvm, 7)) in kvm_s390_vcpu_setup_model()
3167 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list; in kvm_s390_vcpu_setup_model()
3175 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | in kvm_s390_vcpu_setup()
3179 if (test_kvm_facility(vcpu->kvm, 78)) in kvm_s390_vcpu_setup()
3181 else if (test_kvm_facility(vcpu->kvm, 8)) in kvm_s390_vcpu_setup()
3188 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT; in kvm_s390_vcpu_setup()
3189 if (test_kvm_facility(vcpu->kvm, 9)) in kvm_s390_vcpu_setup()
3190 vcpu->arch.sie_block->ecb |= ECB_SRSI; in kvm_s390_vcpu_setup()
3191 if (test_kvm_facility(vcpu->kvm, 73)) in kvm_s390_vcpu_setup()
3192 vcpu->arch.sie_block->ecb |= ECB_TE; in kvm_s390_vcpu_setup()
3194 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi) in kvm_s390_vcpu_setup()
3195 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI; in kvm_s390_vcpu_setup()
3196 if (test_kvm_facility(vcpu->kvm, 130)) in kvm_s390_vcpu_setup()
3197 vcpu->arch.sie_block->ecb2 |= ECB2_IEP; in kvm_s390_vcpu_setup()
3198 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI; in kvm_s390_vcpu_setup()
3200 vcpu->arch.sie_block->eca |= ECA_CEI; in kvm_s390_vcpu_setup()
3202 vcpu->arch.sie_block->eca |= ECA_IB; in kvm_s390_vcpu_setup()
3204 vcpu->arch.sie_block->eca |= ECA_SII; in kvm_s390_vcpu_setup()
3206 vcpu->arch.sie_block->eca |= ECA_SIGPI; in kvm_s390_vcpu_setup()
3207 if (test_kvm_facility(vcpu->kvm, 129)) { in kvm_s390_vcpu_setup()
3208 vcpu->arch.sie_block->eca |= ECA_VX; in kvm_s390_vcpu_setup()
3209 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; in kvm_s390_vcpu_setup()
3211 if (test_kvm_facility(vcpu->kvm, 139)) in kvm_s390_vcpu_setup()
3212 vcpu->arch.sie_block->ecd |= ECD_MEF; in kvm_s390_vcpu_setup()
3213 if (test_kvm_facility(vcpu->kvm, 156)) in kvm_s390_vcpu_setup()
3214 vcpu->arch.sie_block->ecd |= ECD_ETOKENF; in kvm_s390_vcpu_setup()
3215 if (vcpu->arch.sie_block->gd) { in kvm_s390_vcpu_setup()
3216 vcpu->arch.sie_block->eca |= ECA_AIV; in kvm_s390_vcpu_setup()
3217 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u", in kvm_s390_vcpu_setup()
3218 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id); in kvm_s390_vcpu_setup()
3220 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx) in kvm_s390_vcpu_setup()
3222 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb; in kvm_s390_vcpu_setup()
3227 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; in kvm_s390_vcpu_setup()
3229 if (vcpu->kvm->arch.use_cmma) { in kvm_s390_vcpu_setup()
3234 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in kvm_s390_vcpu_setup()
3235 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; in kvm_s390_vcpu_setup()
3237 vcpu->arch.sie_block->hpid = HPID_KVM; in kvm_s390_vcpu_setup()
3241 mutex_lock(&vcpu->kvm->lock); in kvm_s390_vcpu_setup()
3242 if (kvm_s390_pv_is_protected(vcpu->kvm)) { in kvm_s390_vcpu_setup()
3247 mutex_unlock(&vcpu->kvm->lock); in kvm_s390_vcpu_setup()
3255 return -EINVAL; in kvm_arch_vcpu_precreate()
3267 return -ENOMEM; in kvm_arch_vcpu_create()
3269 vcpu->arch.sie_block = &sie_page->sie_block; in kvm_arch_vcpu_create()
3270 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; in kvm_arch_vcpu_create()
3273 vcpu->arch.sie_block->mso = 0; in kvm_arch_vcpu_create()
3274 vcpu->arch.sie_block->msl = sclp.hamax; in kvm_arch_vcpu_create()
3276 vcpu->arch.sie_block->icpua = vcpu->vcpu_id; in kvm_arch_vcpu_create()
3277 spin_lock_init(&vcpu->arch.local_int.lock); in kvm_arch_vcpu_create()
3278 vcpu->arch.sie_block->gd = (u32)(u64)vcpu->kvm->arch.gisa_int.origin; in kvm_arch_vcpu_create()
3279 if (vcpu->arch.sie_block->gd && sclp.has_gisaf) in kvm_arch_vcpu_create()
3280 vcpu->arch.sie_block->gd |= GISA_FORMAT1; in kvm_arch_vcpu_create()
3281 seqcount_init(&vcpu->arch.cputm_seqcount); in kvm_arch_vcpu_create()
3283 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; in kvm_arch_vcpu_create()
3285 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | in kvm_arch_vcpu_create()
3293 if (test_kvm_facility(vcpu->kvm, 64)) in kvm_arch_vcpu_create()
3294 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB; in kvm_arch_vcpu_create()
3295 if (test_kvm_facility(vcpu->kvm, 82)) in kvm_arch_vcpu_create()
3296 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC; in kvm_arch_vcpu_create()
3297 if (test_kvm_facility(vcpu->kvm, 133)) in kvm_arch_vcpu_create()
3298 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB; in kvm_arch_vcpu_create()
3299 if (test_kvm_facility(vcpu->kvm, 156)) in kvm_arch_vcpu_create()
3300 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN; in kvm_arch_vcpu_create()
3305 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS; in kvm_arch_vcpu_create()
3307 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS; in kvm_arch_vcpu_create()
3309 if (kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_create()
3315 VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", in kvm_arch_vcpu_create()
3316 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block); in kvm_arch_vcpu_create()
3317 trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block); in kvm_arch_vcpu_create()
3325 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_create()
3326 gmap_remove(vcpu->arch.gmap); in kvm_arch_vcpu_create()
3328 free_page((unsigned long)(vcpu->arch.sie_block)); in kvm_arch_vcpu_create()
3334 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask); in kvm_arch_vcpu_runnable()
3340 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE); in kvm_arch_vcpu_in_kernel()
3345 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_block()
3351 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_unblock()
3356 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_request()
3362 return atomic_read(&vcpu->arch.sie_block->prog20) & in kvm_s390_vcpu_sie_inhibited()
3368 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_request_handled()
3379 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) in exit_sie()
3393 struct kvm *kvm = gmap->private; in kvm_gmap_notifier()
3406 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) { in kvm_gmap_notifier()
3407 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx", in kvm_gmap_notifier()
3419 vcpu->stat.halt_no_poll_steal++; in kvm_arch_no_poll()
3435 int r = -EINVAL; in kvm_arch_vcpu_ioctl_get_one_reg()
3437 switch (reg->id) { in kvm_arch_vcpu_ioctl_get_one_reg()
3439 r = put_user(vcpu->arch.sie_block->todpr, in kvm_arch_vcpu_ioctl_get_one_reg()
3440 (u32 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
3443 r = put_user(vcpu->arch.sie_block->epoch, in kvm_arch_vcpu_ioctl_get_one_reg()
3444 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
3448 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
3451 r = put_user(vcpu->arch.sie_block->ckc, in kvm_arch_vcpu_ioctl_get_one_reg()
3452 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
3455 r = put_user(vcpu->arch.pfault_token, in kvm_arch_vcpu_ioctl_get_one_reg()
3456 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
3459 r = put_user(vcpu->arch.pfault_compare, in kvm_arch_vcpu_ioctl_get_one_reg()
3460 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
3463 r = put_user(vcpu->arch.pfault_select, in kvm_arch_vcpu_ioctl_get_one_reg()
3464 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
3467 r = put_user(vcpu->arch.sie_block->pp, in kvm_arch_vcpu_ioctl_get_one_reg()
3468 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
3471 r = put_user(vcpu->arch.sie_block->gbea, in kvm_arch_vcpu_ioctl_get_one_reg()
3472 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
3484 int r = -EINVAL; in kvm_arch_vcpu_ioctl_set_one_reg()
3487 switch (reg->id) { in kvm_arch_vcpu_ioctl_set_one_reg()
3489 r = get_user(vcpu->arch.sie_block->todpr, in kvm_arch_vcpu_ioctl_set_one_reg()
3490 (u32 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
3493 r = get_user(vcpu->arch.sie_block->epoch, in kvm_arch_vcpu_ioctl_set_one_reg()
3494 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
3497 r = get_user(val, (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
3502 r = get_user(vcpu->arch.sie_block->ckc, in kvm_arch_vcpu_ioctl_set_one_reg()
3503 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
3506 r = get_user(vcpu->arch.pfault_token, in kvm_arch_vcpu_ioctl_set_one_reg()
3507 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
3508 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in kvm_arch_vcpu_ioctl_set_one_reg()
3512 r = get_user(vcpu->arch.pfault_compare, in kvm_arch_vcpu_ioctl_set_one_reg()
3513 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
3516 r = get_user(vcpu->arch.pfault_select, in kvm_arch_vcpu_ioctl_set_one_reg()
3517 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
3520 r = get_user(vcpu->arch.sie_block->pp, in kvm_arch_vcpu_ioctl_set_one_reg()
3521 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
3524 r = get_user(vcpu->arch.sie_block->gbea, in kvm_arch_vcpu_ioctl_set_one_reg()
3525 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
3536 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI; in kvm_arch_vcpu_ioctl_normal_reset()
3537 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; in kvm_arch_vcpu_ioctl_normal_reset()
3538 memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb)); in kvm_arch_vcpu_ioctl_normal_reset()
3541 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) in kvm_arch_vcpu_ioctl_normal_reset()
3555 vcpu->arch.sie_block->gpsw.mask = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3556 vcpu->arch.sie_block->gpsw.addr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3559 vcpu->arch.sie_block->ckc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3560 memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr)); in kvm_arch_vcpu_ioctl_initial_reset()
3561 vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
3562 vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
3565 memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs)); in kvm_arch_vcpu_ioctl_initial_reset()
3566 vcpu->run->s.regs.ckc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3567 vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
3568 vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
3569 vcpu->run->psw_addr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3570 vcpu->run->psw_mask = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3571 vcpu->run->s.regs.todpr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3572 vcpu->run->s.regs.cputm = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3573 vcpu->run->s.regs.ckc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3574 vcpu->run->s.regs.pp = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3575 vcpu->run->s.regs.gbea = 1; in kvm_arch_vcpu_ioctl_initial_reset()
3576 vcpu->run->s.regs.fpc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3583 vcpu->arch.sie_block->gbea = 1; in kvm_arch_vcpu_ioctl_initial_reset()
3584 vcpu->arch.sie_block->pp = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3585 vcpu->arch.sie_block->fpf &= ~FPF_BPBC; in kvm_arch_vcpu_ioctl_initial_reset()
3586 vcpu->arch.sie_block->todpr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
3592 struct kvm_sync_regs *regs = &vcpu->run->s.regs; in kvm_arch_vcpu_ioctl_clear_reset()
3597 memset(®s->gprs, 0, sizeof(regs->gprs)); in kvm_arch_vcpu_ioctl_clear_reset()
3598 memset(®s->vrs, 0, sizeof(regs->vrs)); in kvm_arch_vcpu_ioctl_clear_reset()
3599 memset(®s->acrs, 0, sizeof(regs->acrs)); in kvm_arch_vcpu_ioctl_clear_reset()
3600 memset(®s->gscb, 0, sizeof(regs->gscb)); in kvm_arch_vcpu_ioctl_clear_reset()
3602 regs->etoken = 0; in kvm_arch_vcpu_ioctl_clear_reset()
3603 regs->etoken_extension = 0; in kvm_arch_vcpu_ioctl_clear_reset()
3609 memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs)); in kvm_arch_vcpu_ioctl_set_regs()
3617 memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs)); in kvm_arch_vcpu_ioctl_get_regs()
3627 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs)); in kvm_arch_vcpu_ioctl_set_sregs()
3628 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); in kvm_arch_vcpu_ioctl_set_sregs()
3639 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs)); in kvm_arch_vcpu_ioctl_get_sregs()
3640 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); in kvm_arch_vcpu_ioctl_get_sregs()
3652 vcpu->run->s.regs.fpc = fpu->fpc; in kvm_arch_vcpu_ioctl_set_fpu()
3654 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs, in kvm_arch_vcpu_ioctl_set_fpu()
3655 (freg_t *) fpu->fprs); in kvm_arch_vcpu_ioctl_set_fpu()
3657 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs)); in kvm_arch_vcpu_ioctl_set_fpu()
3670 convert_vx_to_fp((freg_t *) fpu->fprs, in kvm_arch_vcpu_ioctl_get_fpu()
3671 (__vector128 *) vcpu->run->s.regs.vrs); in kvm_arch_vcpu_ioctl_get_fpu()
3673 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs)); in kvm_arch_vcpu_ioctl_get_fpu()
3674 fpu->fpc = vcpu->run->s.regs.fpc; in kvm_arch_vcpu_ioctl_get_fpu()
3685 rc = -EBUSY; in kvm_arch_vcpu_ioctl_set_initial_psw()
3687 vcpu->run->psw_mask = psw.mask; in kvm_arch_vcpu_ioctl_set_initial_psw()
3688 vcpu->run->psw_addr = psw.addr; in kvm_arch_vcpu_ioctl_set_initial_psw()
3696 return -EINVAL; /* not implemented yet */ in kvm_arch_vcpu_ioctl_translate()
3710 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
3713 if (dbg->control & ~VALID_GUESTDBG_FLAGS) { in kvm_arch_vcpu_ioctl_set_guest_debug()
3714 rc = -EINVAL; in kvm_arch_vcpu_ioctl_set_guest_debug()
3718 rc = -EINVAL; in kvm_arch_vcpu_ioctl_set_guest_debug()
3722 if (dbg->control & KVM_GUESTDBG_ENABLE) { in kvm_arch_vcpu_ioctl_set_guest_debug()
3723 vcpu->guest_debug = dbg->control; in kvm_arch_vcpu_ioctl_set_guest_debug()
3727 if (dbg->control & KVM_GUESTDBG_USE_HW_BP) in kvm_arch_vcpu_ioctl_set_guest_debug()
3731 vcpu->arch.guestdbg.last_bp = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
3735 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
3767 /* user space knows about this interface - let it control the state */ in kvm_arch_vcpu_ioctl_set_mpstate()
3768 vcpu->kvm->arch.user_cpu_state_ctrl = 1; in kvm_arch_vcpu_ioctl_set_mpstate()
3770 switch (mp_state->mp_state) { in kvm_arch_vcpu_ioctl_set_mpstate()
3779 rc = -ENXIO; in kvm_arch_vcpu_ioctl_set_mpstate()
3787 rc = -ENXIO; in kvm_arch_vcpu_ioctl_set_mpstate()
3806 * We use MMU_RELOAD just to re-arm the ipte notifier for the in kvm_s390_handle_requests()
3814 rc = gmap_mprotect_notify(vcpu->arch.gmap, in kvm_s390_handle_requests()
3825 vcpu->arch.sie_block->ihcpu = 0xffff; in kvm_s390_handle_requests()
3831 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); in kvm_s390_handle_requests()
3839 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); in kvm_s390_handle_requests()
3846 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC; in kvm_s390_handle_requests()
3856 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA; in kvm_s390_handle_requests()
3862 * Re-enable CMM virtualization if CMMA is available and in kvm_s390_handle_requests()
3865 if ((vcpu->kvm->arch.use_cmma) && in kvm_s390_handle_requests()
3866 (vcpu->kvm->mm->context.uses_cmm)) in kvm_s390_handle_requests()
3867 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA; in kvm_s390_handle_requests()
3889 kvm->arch.epoch = gtod->tod - htod.tod; in __kvm_s390_set_tod_clock()
3890 kvm->arch.epdx = 0; in __kvm_s390_set_tod_clock()
3892 kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx; in __kvm_s390_set_tod_clock()
3893 if (kvm->arch.epoch > gtod->tod) in __kvm_s390_set_tod_clock()
3894 kvm->arch.epdx -= 1; in __kvm_s390_set_tod_clock()
3899 vcpu->arch.sie_block->epoch = kvm->arch.epoch; in __kvm_s390_set_tod_clock()
3900 vcpu->arch.sie_block->epdx = kvm->arch.epdx; in __kvm_s390_set_tod_clock()
3909 if (!mutex_trylock(&kvm->lock)) in kvm_s390_try_set_tod_clock()
3912 mutex_unlock(&kvm->lock); in kvm_s390_try_set_tod_clock()
3917 * kvm_arch_fault_in_page - fault-in guest page if necessary
3922 * Make sure that a guest page has been faulted-in on the host.
3928 return gmap_fault(vcpu->arch.gmap, gpa, in kvm_arch_fault_in_page()
3945 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); in __kvm_inject_pfault_token()
3952 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token); in kvm_arch_async_page_not_present()
3953 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token); in kvm_arch_async_page_not_present()
3961 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token); in kvm_arch_async_page_present()
3962 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token); in kvm_arch_async_page_present()
3985 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in kvm_arch_setup_async_pf()
3987 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) != in kvm_arch_setup_async_pf()
3988 vcpu->arch.pfault_compare) in kvm_arch_setup_async_pf()
3994 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK)) in kvm_arch_setup_async_pf()
3996 if (!vcpu->arch.gmap->pfault_enabled) in kvm_arch_setup_async_pf()
3999 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr)); in kvm_arch_setup_async_pf()
4000 hva += current->thread.gmap_addr & ~PAGE_MASK; in kvm_arch_setup_async_pf()
4001 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8)) in kvm_arch_setup_async_pf()
4004 return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch); in kvm_arch_setup_async_pf()
4018 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14]; in vcpu_pre_run()
4019 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15]; in vcpu_pre_run()
4024 if (!kvm_is_ucontrol(vcpu->kvm)) { in vcpu_pre_run()
4039 clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.gisa_int.kicked_mask); in vcpu_pre_run()
4041 vcpu->arch.sie_block->icptcode = 0; in vcpu_pre_run()
4042 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); in vcpu_pre_run()
4068 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1); in vcpu_post_run_fault_in_sie()
4073 /* Instruction-Fetching Exceptions - we can't detect the ilen. in vcpu_post_run_fault_in_sie()
4077 pgm_info = vcpu->arch.pgm; in vcpu_post_run_fault_in_sie()
4091 vcpu->arch.sie_block->icptcode); in vcpu_post_run()
4092 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); in vcpu_post_run()
4097 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14; in vcpu_post_run()
4098 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15; in vcpu_post_run()
4100 if (exit_reason == -EINTR) { in vcpu_post_run()
4102 sie_page = container_of(vcpu->arch.sie_block, in vcpu_post_run()
4104 mcck_info = &sie_page->mcck_info; in vcpu_post_run()
4109 if (vcpu->arch.sie_block->icptcode > 0) { in vcpu_post_run()
4112 if (rc != -EOPNOTSUPP) in vcpu_post_run()
4114 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC; in vcpu_post_run()
4115 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; in vcpu_post_run()
4116 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; in vcpu_post_run()
4117 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; in vcpu_post_run()
4118 return -EREMOTE; in vcpu_post_run()
4119 } else if (exit_reason != -EFAULT) { in vcpu_post_run()
4120 vcpu->stat.exit_null++; in vcpu_post_run()
4122 } else if (kvm_is_ucontrol(vcpu->kvm)) { in vcpu_post_run()
4123 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL; in vcpu_post_run()
4124 vcpu->run->s390_ucontrol.trans_exc_code = in vcpu_post_run()
4125 current->thread.gmap_addr; in vcpu_post_run()
4126 vcpu->run->s390_ucontrol.pgm_code = 0x10; in vcpu_post_run()
4127 return -EREMOTE; in vcpu_post_run()
4128 } else if (current->thread.gmap_pfault) { in vcpu_post_run()
4130 current->thread.gmap_pfault = 0; in vcpu_post_run()
4133 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1); in vcpu_post_run()
4142 struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block; in __vcpu_run()
4145 * We try to hold kvm->srcu during most of vcpu_run (except when run- in __vcpu_run()
4148 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in __vcpu_run()
4155 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in __vcpu_run()
4165 memcpy(sie_page->pv_grregs, in __vcpu_run()
4166 vcpu->run->s.regs.gprs, in __vcpu_run()
4167 sizeof(sie_page->pv_grregs)); in __vcpu_run()
4169 exit_reason = sie64a(vcpu->arch.sie_block, in __vcpu_run()
4170 vcpu->run->s.regs.gprs); in __vcpu_run()
4172 memcpy(vcpu->run->s.regs.gprs, in __vcpu_run()
4173 sie_page->pv_grregs, in __vcpu_run()
4174 sizeof(sie_page->pv_grregs)); in __vcpu_run()
4177 * that leave the guest state in an "in-between" state in __vcpu_run()
4181 if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR || in __vcpu_run()
4182 vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) { in __vcpu_run()
4183 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK; in __vcpu_run()
4190 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in __vcpu_run()
4195 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in __vcpu_run()
4201 struct kvm_run *kvm_run = vcpu->run; in sync_regs_fmt2()
4205 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb; in sync_regs_fmt2()
4206 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb; in sync_regs_fmt2()
4207 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; in sync_regs_fmt2()
4208 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; in sync_regs_fmt2()
4209 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) { in sync_regs_fmt2()
4210 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr; in sync_regs_fmt2()
4211 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp; in sync_regs_fmt2()
4212 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea; in sync_regs_fmt2()
4214 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) { in sync_regs_fmt2()
4215 vcpu->arch.pfault_token = kvm_run->s.regs.pft; in sync_regs_fmt2()
4216 vcpu->arch.pfault_select = kvm_run->s.regs.pfs; in sync_regs_fmt2()
4217 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc; in sync_regs_fmt2()
4218 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in sync_regs_fmt2()
4221 if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) { in sync_regs_fmt2()
4222 vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318; in sync_regs_fmt2()
4223 vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc; in sync_regs_fmt2()
4229 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) && in sync_regs_fmt2()
4230 test_kvm_facility(vcpu->kvm, 64) && in sync_regs_fmt2()
4231 riccb->v && in sync_regs_fmt2()
4232 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) { in sync_regs_fmt2()
4234 vcpu->arch.sie_block->ecb3 |= ECB3_RI; in sync_regs_fmt2()
4237 * If userspace sets the gscb (e.g. after migration) to non-zero, in sync_regs_fmt2()
4240 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) && in sync_regs_fmt2()
4241 test_kvm_facility(vcpu->kvm, 133) && in sync_regs_fmt2()
4242 gscb->gssm && in sync_regs_fmt2()
4243 !vcpu->arch.gs_enabled) { in sync_regs_fmt2()
4245 vcpu->arch.sie_block->ecb |= ECB_GS; in sync_regs_fmt2()
4246 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; in sync_regs_fmt2()
4247 vcpu->arch.gs_enabled = 1; in sync_regs_fmt2()
4249 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) && in sync_regs_fmt2()
4250 test_kvm_facility(vcpu->kvm, 82)) { in sync_regs_fmt2()
4251 vcpu->arch.sie_block->fpf &= ~FPF_BPBC; in sync_regs_fmt2()
4252 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0; in sync_regs_fmt2()
4257 if (current->thread.gs_cb) { in sync_regs_fmt2()
4258 vcpu->arch.host_gscb = current->thread.gs_cb; in sync_regs_fmt2()
4259 save_gs_cb(vcpu->arch.host_gscb); in sync_regs_fmt2()
4261 if (vcpu->arch.gs_enabled) { in sync_regs_fmt2()
4262 current->thread.gs_cb = (struct gs_cb *) in sync_regs_fmt2()
4263 &vcpu->run->s.regs.gscb; in sync_regs_fmt2()
4264 restore_gs_cb(current->thread.gs_cb); in sync_regs_fmt2()
4273 struct kvm_run *kvm_run = vcpu->run; in sync_regs()
4275 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) in sync_regs()
4276 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); in sync_regs()
4277 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) { in sync_regs()
4278 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128); in sync_regs()
4282 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) { in sync_regs()
4283 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm); in sync_regs()
4284 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc; in sync_regs()
4286 save_access_regs(vcpu->arch.host_acrs); in sync_regs()
4287 restore_access_regs(vcpu->run->s.regs.acrs); in sync_regs()
4290 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc; in sync_regs()
4291 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs; in sync_regs()
4293 current->thread.fpu.regs = vcpu->run->s.regs.vrs; in sync_regs()
4295 current->thread.fpu.regs = vcpu->run->s.regs.fprs; in sync_regs()
4296 current->thread.fpu.fpc = vcpu->run->s.regs.fpc; in sync_regs()
4297 if (test_fp_ctl(current->thread.fpu.fpc)) in sync_regs()
4299 current->thread.fpu.fpc = 0; in sync_regs()
4314 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC; in sync_regs()
4315 vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask & in sync_regs()
4319 kvm_run->kvm_dirty_regs = 0; in sync_regs()
4324 struct kvm_run *kvm_run = vcpu->run; in store_regs_fmt2()
4326 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr; in store_regs_fmt2()
4327 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp; in store_regs_fmt2()
4328 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea; in store_regs_fmt2()
4329 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC; in store_regs_fmt2()
4330 kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val; in store_regs_fmt2()
4334 if (vcpu->arch.gs_enabled) in store_regs_fmt2()
4335 save_gs_cb(current->thread.gs_cb); in store_regs_fmt2()
4336 current->thread.gs_cb = vcpu->arch.host_gscb; in store_regs_fmt2()
4337 restore_gs_cb(vcpu->arch.host_gscb); in store_regs_fmt2()
4338 if (!vcpu->arch.host_gscb) in store_regs_fmt2()
4340 vcpu->arch.host_gscb = NULL; in store_regs_fmt2()
4348 struct kvm_run *kvm_run = vcpu->run; in store_regs()
4350 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; in store_regs()
4351 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; in store_regs()
4352 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu); in store_regs()
4353 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); in store_regs()
4354 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu); in store_regs()
4355 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc; in store_regs()
4356 kvm_run->s.regs.pft = vcpu->arch.pfault_token; in store_regs()
4357 kvm_run->s.regs.pfs = vcpu->arch.pfault_select; in store_regs()
4358 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; in store_regs()
4359 save_access_regs(vcpu->run->s.regs.acrs); in store_regs()
4360 restore_access_regs(vcpu->arch.host_acrs); in store_regs()
4363 vcpu->run->s.regs.fpc = current->thread.fpu.fpc; in store_regs()
4365 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc; in store_regs()
4366 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs; in store_regs()
4373 struct kvm_run *kvm_run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
4376 if (kvm_run->immediate_exit) in kvm_arch_vcpu_ioctl_run()
4377 return -EINTR; in kvm_arch_vcpu_ioctl_run()
4379 if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS || in kvm_arch_vcpu_ioctl_run()
4380 kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS) in kvm_arch_vcpu_ioctl_run()
4381 return -EINVAL; in kvm_arch_vcpu_ioctl_run()
4397 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { in kvm_arch_vcpu_ioctl_run()
4401 vcpu->vcpu_id); in kvm_arch_vcpu_ioctl_run()
4402 rc = -EINVAL; in kvm_arch_vcpu_ioctl_run()
4413 kvm_run->exit_reason = KVM_EXIT_INTR; in kvm_arch_vcpu_ioctl_run()
4414 rc = -EINTR; in kvm_arch_vcpu_ioctl_run()
4422 if (rc == -EREMOTE) { in kvm_arch_vcpu_ioctl_run()
4432 vcpu->stat.exit_userspace++; in kvm_arch_vcpu_ioctl_run()
4441 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
4442 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
4455 return -EFAULT; in kvm_s390_store_status_unloaded()
4459 return -EFAULT; in kvm_s390_store_status_unloaded()
4462 gpa -= __LC_FPREGS_SAVE_AREA; in kvm_s390_store_status_unloaded()
4466 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs); in kvm_s390_store_status_unloaded()
4471 vcpu->run->s.regs.fprs, 128); in kvm_s390_store_status_unloaded()
4474 vcpu->run->s.regs.gprs, 128); in kvm_s390_store_status_unloaded()
4476 &vcpu->arch.sie_block->gpsw, 16); in kvm_s390_store_status_unloaded()
4480 &vcpu->run->s.regs.fpc, 4); in kvm_s390_store_status_unloaded()
4482 &vcpu->arch.sie_block->todpr, 4); in kvm_s390_store_status_unloaded()
4486 clkcomp = vcpu->arch.sie_block->ckc >> 8; in kvm_s390_store_status_unloaded()
4490 &vcpu->run->s.regs.acrs, 64); in kvm_s390_store_status_unloaded()
4492 &vcpu->arch.sie_block->gcr, 128); in kvm_s390_store_status_unloaded()
4493 return rc ? -EFAULT : 0; in kvm_s390_store_status_unloaded()
4504 vcpu->run->s.regs.fpc = current->thread.fpu.fpc; in kvm_s390_vcpu_store_status()
4505 save_access_regs(vcpu->run->s.regs.acrs); in kvm_s390_vcpu_store_status()
4541 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1); in kvm_s390_vcpu_start()
4543 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
4544 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_start()
4550 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
4556 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) in kvm_s390_vcpu_start()
4561 /* we're the only active VCPU -> speed it up */ in kvm_s390_vcpu_start()
4569 __disable_ibs_on_all_vcpus(vcpu->kvm); in kvm_s390_vcpu_start()
4579 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK; in kvm_s390_vcpu_start()
4585 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
4597 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0); in kvm_s390_vcpu_stop()
4599 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
4600 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_stop()
4606 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
4623 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) { in kvm_s390_vcpu_stop()
4625 started_vcpu = vcpu->kvm->vcpus[i]; in kvm_s390_vcpu_stop()
4637 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
4646 if (cap->flags) in kvm_vcpu_ioctl_enable_cap()
4647 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
4649 switch (cap->cap) { in kvm_vcpu_ioctl_enable_cap()
4651 if (!vcpu->kvm->arch.css_support) { in kvm_vcpu_ioctl_enable_cap()
4652 vcpu->kvm->arch.css_support = 1; in kvm_vcpu_ioctl_enable_cap()
4653 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support"); in kvm_vcpu_ioctl_enable_cap()
4654 trace_kvm_s390_enable_css(vcpu->kvm); in kvm_vcpu_ioctl_enable_cap()
4659 r = -EINVAL; in kvm_vcpu_ioctl_enable_cap()
4668 void __user *uaddr = (void __user *)mop->buf; in kvm_s390_guest_sida_op()
4671 if (mop->flags || !mop->size) in kvm_s390_guest_sida_op()
4672 return -EINVAL; in kvm_s390_guest_sida_op()
4673 if (mop->size + mop->sida_offset < mop->size) in kvm_s390_guest_sida_op()
4674 return -EINVAL; in kvm_s390_guest_sida_op()
4675 if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block)) in kvm_s390_guest_sida_op()
4676 return -E2BIG; in kvm_s390_guest_sida_op()
4678 return -EINVAL; in kvm_s390_guest_sida_op()
4680 switch (mop->op) { in kvm_s390_guest_sida_op()
4682 if (copy_to_user(uaddr, (void *)(sida_origin(vcpu->arch.sie_block) + in kvm_s390_guest_sida_op()
4683 mop->sida_offset), mop->size)) in kvm_s390_guest_sida_op()
4684 r = -EFAULT; in kvm_s390_guest_sida_op()
4688 if (copy_from_user((void *)(sida_origin(vcpu->arch.sie_block) + in kvm_s390_guest_sida_op()
4689 mop->sida_offset), uaddr, mop->size)) in kvm_s390_guest_sida_op()
4690 r = -EFAULT; in kvm_s390_guest_sida_op()
4698 void __user *uaddr = (void __user *)mop->buf; in kvm_s390_guest_mem_op()
4704 if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size) in kvm_s390_guest_mem_op()
4705 return -EINVAL; in kvm_s390_guest_mem_op()
4707 if (mop->size > MEM_OP_MAX_SIZE) in kvm_s390_guest_mem_op()
4708 return -E2BIG; in kvm_s390_guest_mem_op()
4711 return -EINVAL; in kvm_s390_guest_mem_op()
4713 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) { in kvm_s390_guest_mem_op()
4714 tmpbuf = vmalloc(mop->size); in kvm_s390_guest_mem_op()
4716 return -ENOMEM; in kvm_s390_guest_mem_op()
4719 switch (mop->op) { in kvm_s390_guest_mem_op()
4721 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { in kvm_s390_guest_mem_op()
4722 r = check_gva_range(vcpu, mop->gaddr, mop->ar, in kvm_s390_guest_mem_op()
4723 mop->size, GACC_FETCH); in kvm_s390_guest_mem_op()
4726 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size); in kvm_s390_guest_mem_op()
4728 if (copy_to_user(uaddr, tmpbuf, mop->size)) in kvm_s390_guest_mem_op()
4729 r = -EFAULT; in kvm_s390_guest_mem_op()
4733 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { in kvm_s390_guest_mem_op()
4734 r = check_gva_range(vcpu, mop->gaddr, mop->ar, in kvm_s390_guest_mem_op()
4735 mop->size, GACC_STORE); in kvm_s390_guest_mem_op()
4738 if (copy_from_user(tmpbuf, uaddr, mop->size)) { in kvm_s390_guest_mem_op()
4739 r = -EFAULT; in kvm_s390_guest_mem_op()
4742 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size); in kvm_s390_guest_mem_op()
4746 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0) in kvm_s390_guest_mem_op()
4747 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); in kvm_s390_guest_mem_op()
4758 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_s390_guest_memsida_op()
4760 switch (mop->op) { in kvm_s390_guest_memsida_op()
4767 /* we are locked against sida going away by the vcpu->mutex */ in kvm_s390_guest_memsida_op()
4771 r = -EINVAL; in kvm_s390_guest_memsida_op()
4774 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); in kvm_s390_guest_memsida_op()
4781 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_async_ioctl()
4789 return -EFAULT; in kvm_arch_vcpu_async_ioctl()
4797 return -EFAULT; in kvm_arch_vcpu_async_ioctl()
4799 return -EINVAL; in kvm_arch_vcpu_async_ioctl()
4803 return -ENOIOCTLCMD; in kvm_arch_vcpu_async_ioctl()
4809 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl()
4819 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
4821 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
4826 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4866 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4869 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4883 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4887 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
4888 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4892 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr, in kvm_arch_vcpu_ioctl()
4900 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4904 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
4905 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4909 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr, in kvm_arch_vcpu_ioctl()
4915 r = gmap_fault(vcpu->arch.gmap, arg, 0); in kvm_arch_vcpu_ioctl()
4921 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4933 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4939 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4945 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4957 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4961 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4971 r = -ENOTTY; in kvm_arch_vcpu_ioctl()
4981 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET) in kvm_arch_vcpu_fault()
4982 && (kvm_is_ucontrol(vcpu->kvm))) { in kvm_arch_vcpu_fault()
4983 vmf->page = virt_to_page(vcpu->arch.sie_block); in kvm_arch_vcpu_fault()
4984 get_page(vmf->page); in kvm_arch_vcpu_fault()
5002 if (mem->userspace_addr & 0xffffful) in kvm_arch_prepare_memory_region()
5003 return -EINVAL; in kvm_arch_prepare_memory_region()
5005 if (mem->memory_size & 0xffffful) in kvm_arch_prepare_memory_region()
5006 return -EINVAL; in kvm_arch_prepare_memory_region()
5008 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit) in kvm_arch_prepare_memory_region()
5009 return -EINVAL; in kvm_arch_prepare_memory_region()
5013 return -EINVAL; in kvm_arch_prepare_memory_region()
5015 if (!kvm->arch.migration_mode) in kvm_arch_prepare_memory_region()
5020 * - userspace creates a new memslot with dirty logging off, in kvm_arch_prepare_memory_region()
5021 * - userspace modifies an existing memslot (MOVE or FLAGS_ONLY) and in kvm_arch_prepare_memory_region()
5027 !(mem->flags & KVM_MEM_LOG_DIRTY_PAGES)) in kvm_arch_prepare_memory_region()
5044 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
5045 old->npages * PAGE_SIZE); in kvm_arch_commit_memory_region()
5048 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
5049 old->npages * PAGE_SIZE); in kvm_arch_commit_memory_region()
5054 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, in kvm_arch_commit_memory_region()
5055 mem->guest_phys_addr, mem->memory_size); in kvm_arch_commit_memory_region()
5076 vcpu->valid_wakeup = false; in kvm_arch_vcpu_block_finish()
5085 return -ENODEV; in kvm_s390_init()
5090 return -EINVAL; in kvm_s390_init()