• Home
  • Raw
  • Download

Lines Matching +full:reserved +full:- +full:cpu +full:- +full:vectors

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
84 if (cap->flags) in kvm_vm_ioctl_enable_cap()
85 return -EINVAL; in kvm_vm_ioctl_enable_cap()
87 switch (cap->cap) { in kvm_vm_ioctl_enable_cap()
90 kvm->arch.return_nisv_io_abort_to_user = true; in kvm_vm_ioctl_enable_cap()
93 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
109 * Although this is a per-CPU feature, we make it global because in set_default_csv2()
116 kvm->arch.pfr0_csv2 = 1; in set_default_csv2()
120 * kvm_arch_init_vm - initializes a VM data structure
131 ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu); in kvm_arch_init_vm()
142 kvm->arch.max_vcpus = kvm_arm_default_max_vcpus(); in kvm_arch_init_vm()
148 kvm_free_stage2_pgd(&kvm->arch.mmu); in kvm_arch_init_vm()
159 * kvm_arch_destroy_vm - destroy the VM data structure
166 bitmap_free(kvm->arch.pmu_filter); in kvm_arch_destroy_vm()
171 if (kvm->vcpus[i]) { in kvm_arch_destroy_vm()
172 kvm_vcpu_destroy(kvm->vcpus[i]); in kvm_arch_destroy_vm()
173 kvm->vcpus[i] = NULL; in kvm_arch_destroy_vm()
176 atomic_set(&kvm->online_vcpus, 0); in kvm_arch_destroy_vm()
212 r = kvm->arch.max_vcpus; in kvm_vm_ioctl_check_extension()
218 r = -EINVAL; in kvm_vm_ioctl_check_extension()
220 r = kvm->arch.vgic.msis_require_devid; in kvm_vm_ioctl_check_extension()
242 return -EINVAL; in kvm_arch_dev_ioctl()
264 return -EBUSY; in kvm_arch_vcpu_precreate()
266 if (id >= kvm->arch.max_vcpus) in kvm_arch_vcpu_precreate()
267 return -EINVAL; in kvm_arch_vcpu_precreate()
277 vcpu->arch.target = -1; in kvm_arch_vcpu_create()
278 bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); in kvm_arch_vcpu_create()
280 vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO; in kvm_arch_vcpu_create()
289 kvm_arm_pvtime_vcpu_init(&vcpu->arch); in kvm_arch_vcpu_create()
291 vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu; in kvm_arch_vcpu_create()
306 if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm))) in kvm_arch_vcpu_destroy()
309 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); in kvm_arch_vcpu_destroy()
325 * WFI), we need to sync back the state of the GIC CPU interface in kvm_arch_vcpu_blocking()
327 * that kvm_arch_vcpu_runnable has up-to-date data to decide in kvm_arch_vcpu_blocking()
346 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in kvm_arch_vcpu_load() argument
351 mmu = vcpu->arch.hw_mmu; in kvm_arch_vcpu_load()
352 last_ran = this_cpu_ptr(mmu->last_vcpu_ran); in kvm_arch_vcpu_load()
355 * We guarantee that both TLBs and I-cache are private to each in kvm_arch_vcpu_load()
357 * previously run on the same physical CPU, call into the in kvm_arch_vcpu_load()
361 * over-invalidation doesn't affect correctness. in kvm_arch_vcpu_load()
363 if (*last_ran != vcpu->vcpu_id) { in kvm_arch_vcpu_load()
365 *last_ran = vcpu->vcpu_id; in kvm_arch_vcpu_load()
368 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
376 if (kvm_arm_is_pvtime_enabled(&vcpu->arch)) in kvm_arch_vcpu_load()
397 vcpu->cpu = -1; in kvm_arch_vcpu_put()
402 vcpu->arch.power_off = true; in vcpu_power_off()
410 if (vcpu->arch.power_off) in kvm_arch_vcpu_ioctl_get_mpstate()
411 mp_state->mp_state = KVM_MP_STATE_STOPPED; in kvm_arch_vcpu_ioctl_get_mpstate()
413 mp_state->mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_vcpu_ioctl_get_mpstate()
423 switch (mp_state->mp_state) { in kvm_arch_vcpu_ioctl_set_mpstate()
425 vcpu->arch.power_off = false; in kvm_arch_vcpu_ioctl_set_mpstate()
431 ret = -EINVAL; in kvm_arch_vcpu_ioctl_set_mpstate()
438 * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
441 * If the guest CPU is not waiting for interrupts or an interrupt line is
442 * asserted, the CPU is by definition runnable.
448 && !v->arch.power_off && !v->arch.pause); in kvm_arch_vcpu_runnable()
456 /* Just ensure a guest exit from a particular CPU */
469 * need_new_vmid_gen - check that the VMID is still valid
474 * The hardware supports a limited set of values with the value zero reserved
483 smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */ in need_new_vmid_gen()
484 return unlikely(READ_ONCE(vmid->vmid_gen) != current_vmid_gen); in need_new_vmid_gen()
488 * update_vmid - Update the vmid with a valid VMID for the current generation
489 * @vmid: The stage-2 VMID information struct
499 * We need to re-check the vmid_gen here to ensure that if another vcpu in update_vmid()
514 * On SMP we know no other CPUs can use this CPU's or each in update_vmid()
527 vmid->vmid = kvm_next_vmid; in update_vmid()
529 kvm_next_vmid &= (1 << kvm_get_vmid_bits()) - 1; in update_vmid()
532 WRITE_ONCE(vmid->vmid_gen, atomic64_read(&kvm_vmid_gen)); in update_vmid()
539 struct kvm *kvm = vcpu->kvm; in kvm_vcpu_first_run_init()
542 if (likely(vcpu->arch.has_run_once)) in kvm_vcpu_first_run_init()
546 return -EPERM; in kvm_vcpu_first_run_init()
548 vcpu->arch.has_run_once = true; in kvm_vcpu_first_run_init()
590 vcpu->arch.pause = true; in kvm_arm_halt_guest()
600 vcpu->arch.pause = false; in kvm_arm_resume_guest()
610 (!vcpu->arch.power_off) &&(!vcpu->arch.pause), in vcpu_req_sleep()
613 if (vcpu->arch.power_off || vcpu->arch.pause) { in vcpu_req_sleep()
628 return vcpu->arch.target >= 0; in kvm_vcpu_initialized()
660 * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
671 struct kvm_run *run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
675 return -ENOEXEC; in kvm_arch_vcpu_ioctl_run()
681 if (run->exit_reason == KVM_EXIT_MMIO) { in kvm_arch_vcpu_ioctl_run()
687 if (run->immediate_exit) in kvm_arch_vcpu_ioctl_run()
688 return -EINTR; in kvm_arch_vcpu_ioctl_run()
695 run->exit_reason = KVM_EXIT_UNKNOWN; in kvm_arch_vcpu_ioctl_run()
702 update_vmid(&vcpu->arch.hw_mmu->vmid); in kvm_arch_vcpu_ioctl_run()
709 * non-preemptible context. in kvm_arch_vcpu_ioctl_run()
724 ret = -EINTR; in kvm_arch_vcpu_ioctl_run()
725 run->exit_reason = KVM_EXIT_INTR; in kvm_arch_vcpu_ioctl_run()
738 ret = -EINTR; in kvm_arch_vcpu_ioctl_run()
739 run->exit_reason = KVM_EXIT_INTR; in kvm_arch_vcpu_ioctl_run()
747 * Documentation/virt/kvm/vcpu-requests.rst in kvm_arch_vcpu_ioctl_run()
749 smp_store_mb(vcpu->mode, IN_GUEST_MODE); in kvm_arch_vcpu_ioctl_run()
751 if (ret <= 0 || need_new_vmid_gen(&vcpu->arch.hw_mmu->vmid) || in kvm_arch_vcpu_ioctl_run()
753 vcpu->mode = OUTSIDE_GUEST_MODE; in kvm_arch_vcpu_ioctl_run()
774 vcpu->mode = OUTSIDE_GUEST_MODE; in kvm_arch_vcpu_ioctl_run()
775 vcpu->stat.exits++; in kvm_arch_vcpu_ioctl_run()
837 * if implemented by the CPU. If we spot the guest in such in kvm_arch_vcpu_ioctl_run()
844 * As we have caught the guest red-handed, decide that in kvm_arch_vcpu_ioctl_run()
849 vcpu->arch.target = -1; in kvm_arch_vcpu_ioctl_run()
856 /* Tell userspace about in-kernel device output levels */ in kvm_arch_vcpu_ioctl_run()
857 if (unlikely(!irqchip_in_kernel(vcpu->kvm))) { in kvm_arch_vcpu_ioctl_run()
893 * trigger a world-switch round on the running physical CPU to set the in vcpu_interrupt_line()
905 u32 irq = irq_level->irq; in kvm_vm_ioctl_irq_line()
907 int nrcpus = atomic_read(&kvm->online_vcpus); in kvm_vm_ioctl_irq_line()
909 bool level = irq_level->level; in kvm_vm_ioctl_irq_line()
916 trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level); in kvm_vm_ioctl_irq_line()
921 return -ENXIO; in kvm_vm_ioctl_irq_line()
924 return -EINVAL; in kvm_vm_ioctl_irq_line()
928 return -EINVAL; in kvm_vm_ioctl_irq_line()
931 return -EINVAL; in kvm_vm_ioctl_irq_line()
936 return -ENXIO; in kvm_vm_ioctl_irq_line()
939 return -EINVAL; in kvm_vm_ioctl_irq_line()
943 return -EINVAL; in kvm_vm_ioctl_irq_line()
946 return -EINVAL; in kvm_vm_ioctl_irq_line()
948 return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level, NULL); in kvm_vm_ioctl_irq_line()
951 return -ENXIO; in kvm_vm_ioctl_irq_line()
954 return -EINVAL; in kvm_vm_ioctl_irq_line()
959 return -EINVAL; in kvm_vm_ioctl_irq_line()
968 if (init->target != phys_target) in kvm_vcpu_set_target()
969 return -EINVAL; in kvm_vcpu_set_target()
975 if (vcpu->arch.target != -1 && vcpu->arch.target != init->target) in kvm_vcpu_set_target()
976 return -EINVAL; in kvm_vcpu_set_target()
978 /* -ENOENT for unknown features, -EINVAL for invalid combinations. */ in kvm_vcpu_set_target()
979 for (i = 0; i < sizeof(init->features) * 8; i++) { in kvm_vcpu_set_target()
980 bool set = (init->features[i / 32] & (1 << (i % 32))); in kvm_vcpu_set_target()
983 return -ENOENT; in kvm_vcpu_set_target()
989 if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES && in kvm_vcpu_set_target()
990 test_bit(i, vcpu->arch.features) != set) in kvm_vcpu_set_target()
991 return -EINVAL; in kvm_vcpu_set_target()
994 set_bit(i, vcpu->arch.features); in kvm_vcpu_set_target()
997 vcpu->arch.target = phys_target; in kvm_vcpu_set_target()
1002 vcpu->arch.target = -1; in kvm_vcpu_set_target()
1003 bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); in kvm_vcpu_set_target()
1024 * need to invalidate the I-cache though, as FWB does *not* in kvm_arch_vcpu_ioctl_vcpu_init()
1027 if (vcpu->arch.has_run_once) { in kvm_arch_vcpu_ioctl_vcpu_init()
1029 stage2_unmap_vm(vcpu->kvm); in kvm_arch_vcpu_ioctl_vcpu_init()
1037 * Handle the "start in power-off" case. in kvm_arch_vcpu_ioctl_vcpu_init()
1039 if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) in kvm_arch_vcpu_ioctl_vcpu_init()
1042 vcpu->arch.power_off = false; in kvm_arch_vcpu_ioctl_vcpu_init()
1050 int ret = -ENXIO; in kvm_arm_vcpu_set_attr()
1052 switch (attr->group) { in kvm_arm_vcpu_set_attr()
1064 int ret = -ENXIO; in kvm_arm_vcpu_get_attr()
1066 switch (attr->group) { in kvm_arm_vcpu_get_attr()
1078 int ret = -ENXIO; in kvm_arm_vcpu_has_attr()
1080 switch (attr->group) { in kvm_arm_vcpu_has_attr()
1102 /* check whether the reserved field is zero */ in kvm_arm_vcpu_set_events()
1103 for (i = 0; i < ARRAY_SIZE(events->reserved); i++) in kvm_arm_vcpu_set_events()
1104 if (events->reserved[i]) in kvm_arm_vcpu_set_events()
1105 return -EINVAL; in kvm_arm_vcpu_set_events()
1108 for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++) in kvm_arm_vcpu_set_events()
1109 if (events->exception.pad[i]) in kvm_arm_vcpu_set_events()
1110 return -EINVAL; in kvm_arm_vcpu_set_events()
1118 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl()
1127 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1138 r = -ENOEXEC; in kvm_arch_vcpu_ioctl()
1142 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1165 r = -ENOEXEC; in kvm_arch_vcpu_ioctl()
1169 r = -EPERM; in kvm_arch_vcpu_ioctl()
1173 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1180 r = -E2BIG; in kvm_arch_vcpu_ioctl()
1183 r = kvm_arm_copy_reg_indices(vcpu, user_list->reg); in kvm_arch_vcpu_ioctl()
1187 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1194 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1201 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1211 return -EINVAL; in kvm_arch_vcpu_ioctl()
1214 return -EFAULT; in kvm_arch_vcpu_ioctl()
1222 return -EFAULT; in kvm_arch_vcpu_ioctl()
1230 return -ENOEXEC; in kvm_arch_vcpu_ioctl()
1233 return -EFAULT; in kvm_arch_vcpu_ioctl()
1238 r = -EINVAL; in kvm_arch_vcpu_ioctl()
1260 dev_id = (dev_addr->id & KVM_ARM_DEVICE_ID_MASK) >> in kvm_vm_ioctl_set_device_addr()
1262 type = (dev_addr->id & KVM_ARM_DEVICE_TYPE_MASK) >> in kvm_vm_ioctl_set_device_addr()
1268 return -ENXIO; in kvm_vm_ioctl_set_device_addr()
1269 return kvm_vgic_addr(kvm, type, &dev_addr->addr, true); in kvm_vm_ioctl_set_device_addr()
1271 return -ENODEV; in kvm_vm_ioctl_set_device_addr()
1278 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl()
1285 return -ENXIO; in kvm_arch_vm_ioctl()
1286 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
1288 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
1295 return -EFAULT; in kvm_arch_vm_ioctl()
1307 return -EFAULT; in kvm_arch_vm_ioctl()
1312 return -EINVAL; in kvm_arch_vm_ioctl()
1318 return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) - in nvhe_percpu_size()
1335 * !SV2 + !HEL2 -> use direct vectors in kvm_map_vectors()
1336 * SV2 + !HEL2 -> use hardened vectors in place in kvm_map_vectors()
1337 * !SV2 + HEL2 -> allocate one vector slot and use exec mapping in kvm_map_vectors()
1338 * SV2 + HEL2 -> use hardened vectors and use exec mapping in kvm_map_vectors()
1376 * Calculate the raw per-cpu offset without a translation from the in cpu_init_hyp_mode()
1378 * so that we can use adr_l to access per-cpu variables in EL2. in cpu_init_hyp_mode()
1380 tpidr_el2 = (unsigned long)this_cpu_ptr_nvhe_sym(__per_cpu_start) - in cpu_init_hyp_mode()
1400 * Disabling SSBD on a non-VHE system requires us to enable SSBS in cpu_init_hyp_mode()
1417 kvm_init_host_cpu_context(&this_cpu_ptr_hyp_sym(kvm_host_data)->host_ctxt); in cpu_hyp_reinit()
1468 * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should in hyp_init_cpu_pm_notifier()
1469 * re-enable hyp. in hyp_init_cpu_pm_notifier()
1476 * so that the hardware will be re-enabled in hyp_init_cpu_pm_notifier()
1531 * Register CPU lower-power notifier in init_subsystems()
1543 case -ENODEV: in init_subsystems()
1544 case -ENXIO: in init_subsystems()
1570 int cpu; in teardown_hyp_mode() local
1573 for_each_possible_cpu(cpu) { in teardown_hyp_mode()
1574 free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); in teardown_hyp_mode()
1575 free_pages(kvm_arm_hyp_percpu_base[cpu], nvhe_percpu_order()); in teardown_hyp_mode()
1580 * Inits Hyp-mode on all online CPUs
1584 int cpu; in init_hyp_mode() local
1595 * Allocate stack pages for Hypervisor-mode in init_hyp_mode()
1597 for_each_possible_cpu(cpu) { in init_hyp_mode()
1602 err = -ENOMEM; in init_hyp_mode()
1606 per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page; in init_hyp_mode()
1610 * Allocate and initialize pages for Hypervisor-mode percpu regions. in init_hyp_mode()
1612 for_each_possible_cpu(cpu) { in init_hyp_mode()
1618 err = -ENOMEM; in init_hyp_mode()
1624 kvm_arm_hyp_percpu_base[cpu] = (unsigned long)page_addr; in init_hyp_mode()
1628 * Map the Hyp-code called directly from the host in init_hyp_mode()
1633 kvm_err("Cannot map world-switch code\n"); in init_hyp_mode()
1653 kvm_err("Cannot map vectors\n"); in init_hyp_mode()
1660 for_each_possible_cpu(cpu) { in init_hyp_mode()
1661 char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu); in init_hyp_mode()
1674 for_each_possible_cpu(cpu) { in init_hyp_mode()
1675 char *percpu_begin = (char *)kvm_arm_hyp_percpu_base[cpu]; in init_hyp_mode()
1723 return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq, in kvm_arch_irq_bypass_add_producer()
1724 &irqfd->irq_entry); in kvm_arch_irq_bypass_add_producer()
1732 kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq, in kvm_arch_irq_bypass_del_producer()
1733 &irqfd->irq_entry); in kvm_arch_irq_bypass_del_producer()
1741 kvm_arm_halt_guest(irqfd->kvm); in kvm_arch_irq_bypass_stop()
1749 kvm_arm_resume_guest(irqfd->kvm); in kvm_arch_irq_bypass_start()
1753 * Initialize Hyp-mode and memory mappings on all CPUs.
1758 int ret, cpu; in kvm_arch_init() local
1763 return -ENODEV; in kvm_arch_init()
1769 kvm_pr_unimpl("CPU unsupported in non-VHE mode, not initializing\n"); in kvm_arch_init()
1770 return -ENODEV; in kvm_arch_init()
1775 kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \ in kvm_arch_init()
1778 for_each_online_cpu(cpu) { in kvm_arch_init()
1779 smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1); in kvm_arch_init()
1781 kvm_err("Error, CPU %d not supported!\n", cpu); in kvm_arch_init()
1782 return -ENODEV; in kvm_arch_init()