/virt/kvm/ |
D | eventfd.c | 36 kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args) in kvm_arch_irqfd_allowed() argument 46 struct kvm *kvm = irqfd->kvm; in irqfd_inject() local 49 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1, in irqfd_inject() 51 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0, in irqfd_inject() 54 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, in irqfd_inject() 67 struct kvm *kvm; in irqfd_resampler_ack() local 73 kvm = resampler->kvm; in irqfd_resampler_ack() 75 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, in irqfd_resampler_ack() 78 idx = srcu_read_lock(&kvm->irq_srcu); in irqfd_resampler_ack() 83 srcu_read_unlock(&kvm->irq_srcu, idx); in irqfd_resampler_ack() [all …]
|
D | kvm_main.c | 158 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm); 162 __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, in kvm_arch_mmu_notifier_invalidate_range() argument 251 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, in kvm_make_vcpus_request_mask() argument 260 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_make_vcpus_request_mask() 281 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) in kvm_make_all_cpus_request() argument 288 called = kvm_make_vcpus_request_mask(kvm, req, NULL, cpus); in kvm_make_all_cpus_request() 295 void kvm_flush_remote_tlbs(struct kvm *kvm) in kvm_flush_remote_tlbs() argument 301 long dirty_count = smp_load_acquire(&kvm->tlbs_dirty); in kvm_flush_remote_tlbs() 314 if (!kvm_arch_flush_remote_tlb(kvm) in kvm_flush_remote_tlbs() 315 || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) in kvm_flush_remote_tlbs() [all …]
|
D | irqchip.c | 22 int kvm_irq_map_gsi(struct kvm *kvm, in kvm_irq_map_gsi() argument 29 irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu, in kvm_irq_map_gsi() 30 lockdep_is_held(&kvm->irq_lock)); in kvm_irq_map_gsi() 41 int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin) in kvm_irq_map_chip_pin() argument 45 irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); in kvm_irq_map_chip_pin() 49 int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi) in kvm_send_userspace_msi() argument 53 if (!irqchip_in_kernel(kvm) || (msi->flags & ~KVM_MSI_VALID_DEVID)) in kvm_send_userspace_msi() 62 return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1, false); in kvm_send_userspace_msi() 71 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, in kvm_set_irq() argument 83 idx = srcu_read_lock(&kvm->irq_srcu); in kvm_set_irq() [all …]
|
D | coalesced_mmio.c | 54 ring = dev->kvm->coalesced_mmio_ring; in coalesced_mmio_has_room() 69 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; in coalesced_mmio_write() 75 spin_lock(&dev->kvm->ring_lock); in coalesced_mmio_write() 80 spin_unlock(&dev->kvm->ring_lock); in coalesced_mmio_write() 92 spin_unlock(&dev->kvm->ring_lock); in coalesced_mmio_write() 110 int kvm_coalesced_mmio_init(struct kvm *kvm) in kvm_coalesced_mmio_init() argument 121 kvm->coalesced_mmio_ring = page_address(page); in kvm_coalesced_mmio_init() 128 spin_lock_init(&kvm->ring_lock); in kvm_coalesced_mmio_init() 129 INIT_LIST_HEAD(&kvm->coalesced_zones); in kvm_coalesced_mmio_init() 135 void kvm_coalesced_mmio_free(struct kvm *kvm) in kvm_coalesced_mmio_free() argument [all …]
|
D | coalesced_mmio.h | 21 struct kvm *kvm; member 25 int kvm_coalesced_mmio_init(struct kvm *kvm); 26 void kvm_coalesced_mmio_free(struct kvm *kvm); 27 int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, 29 int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, 34 static inline int kvm_coalesced_mmio_init(struct kvm *kvm) { return 0; } in kvm_coalesced_mmio_init() argument 35 static inline void kvm_coalesced_mmio_free(struct kvm *kvm) { } in kvm_coalesced_mmio_free() argument
|
D | vfio.c | 80 static void kvm_vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm) in kvm_vfio_group_set_kvm() argument 82 void (*fn)(struct vfio_group *, struct kvm *); in kvm_vfio_group_set_kvm() 88 fn(group, kvm); in kvm_vfio_group_set_kvm() 137 static void kvm_spapr_tce_release_vfio_group(struct kvm *kvm, in kvm_spapr_tce_release_vfio_group() argument 145 kvm_spapr_tce_release_iommu_group(kvm, grp); in kvm_spapr_tce_release_vfio_group() 176 kvm_arch_register_noncoherent_dma(dev->kvm); in kvm_vfio_update_coherency() 178 kvm_arch_unregister_noncoherent_dma(dev->kvm); in kvm_vfio_update_coherency() 229 kvm_arch_start_assignment(dev->kvm); in kvm_vfio_set_group() 233 kvm_vfio_group_set_kvm(vfio_group, dev->kvm); in kvm_vfio_set_group() 257 kvm_arch_end_assignment(dev->kvm); in kvm_vfio_set_group() [all …]
|
/virt/kvm/arm/vgic/ |
D | vgic-init.c | 52 void kvm_vgic_early_init(struct kvm *kvm) in kvm_vgic_early_init() argument 54 struct vgic_dist *dist = &kvm->arch.vgic; in kvm_vgic_early_init() 71 int kvm_vgic_create(struct kvm *kvm, u32 type) in kvm_vgic_create() argument 76 if (irqchip_in_kernel(kvm)) in kvm_vgic_create() 95 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_vgic_create() 101 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_vgic_create() 108 kvm->arch.max_vcpus = VGIC_V2_MAX_CPUS; in kvm_vgic_create() 110 kvm->arch.max_vcpus = VGIC_V3_MAX_CPUS; in kvm_vgic_create() 112 if (atomic_read(&kvm->online_vcpus) > kvm->arch.max_vcpus) { in kvm_vgic_create() 117 kvm->arch.vgic.in_kernel = true; in kvm_vgic_create() [all …]
|
D | vgic.h | 162 struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, 164 void __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq); 165 void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq); 169 bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, 171 void vgic_kick_vcpus(struct kvm *kvm); 173 int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr, 190 int vgic_v2_map_resources(struct kvm *kvm); 191 int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address, 219 int vgic_v3_map_resources(struct kvm *kvm); 220 int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq); [all …]
|
D | vgic-its.c | 29 static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, 39 static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, in vgic_add_lpi() argument 42 struct vgic_dist *dist = &kvm->arch.vgic; in vgic_add_lpi() 43 struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq; in vgic_add_lpi() 103 ret = update_lpi_config(kvm, irq, NULL, false); in vgic_add_lpi() 105 vgic_put_irq(kvm, irq); in vgic_add_lpi() 109 ret = vgic_v3_lpi_sync_pending_status(kvm, irq); in vgic_add_lpi() 111 vgic_put_irq(kvm, irq); in vgic_add_lpi() 280 static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, in update_lpi_config() argument 283 u64 propbase = GICR_PROPBASER_ADDRESS(kvm->arch.vgic.propbaser); in update_lpi_config() [all …]
|
D | vgic-debug.c | 55 static void iter_init(struct kvm *kvm, struct vgic_state_iter *iter, in iter_init() argument 58 int nr_cpus = atomic_read(&kvm->online_vcpus); in iter_init() 63 iter->nr_spis = kvm->arch.vgic.nr_spis; in iter_init() 64 if (kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { in iter_init() 65 iter->nr_lpis = vgic_copy_lpi_list(kvm, NULL, &iter->lpi_array); in iter_init() 85 struct kvm *kvm = (struct kvm *)s->private; in vgic_debug_start() local 88 mutex_lock(&kvm->lock); in vgic_debug_start() 89 iter = kvm->arch.vgic.iter; in vgic_debug_start() 101 iter_init(kvm, iter, *pos); in vgic_debug_start() 102 kvm->arch.vgic.iter = iter; in vgic_debug_start() [all …]
|
D | vgic-kvm-device.c | 17 int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr, in vgic_check_ioaddr() argument 20 if (addr & ~kvm_phys_mask(kvm)) in vgic_check_ioaddr() 32 static int vgic_check_type(struct kvm *kvm, int type_needed) in vgic_check_type() argument 34 if (kvm->arch.vgic.vgic_model != type_needed) in vgic_check_type() 56 int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write) in kvm_vgic_addr() argument 59 struct vgic_dist *vgic = &kvm->arch.vgic; in kvm_vgic_addr() 63 mutex_lock(&kvm->lock); in kvm_vgic_addr() 66 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); in kvm_vgic_addr() 71 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); in kvm_vgic_addr() 76 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3); in kvm_vgic_addr() [all …]
|
D | vgic-v4.c | 104 int vgic_v4_init(struct kvm *kvm) in vgic_v4_init() argument 106 struct vgic_dist *dist = &kvm->arch.vgic; in vgic_v4_init() 116 nr_vcpus = atomic_read(&kvm->online_vcpus); in vgic_v4_init() 125 kvm_for_each_vcpu(i, vcpu, kvm) in vgic_v4_init() 137 kvm_for_each_vcpu(i, vcpu, kvm) { in vgic_v4_init() 162 vgic_v4_teardown(kvm); in vgic_v4_init() 173 void vgic_v4_teardown(struct kvm *kvm) in vgic_v4_teardown() argument 175 struct its_vm *its_vm = &kvm->arch.vgic.its_vm; in vgic_v4_teardown() 182 struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, i); in vgic_v4_teardown() 197 if (!vgic_supports_direct_msis(vcpu->kvm)) in vgic_v4_sync_hwstate() [all …]
|
D | vgic-mmio.c | 51 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_mmio_read_group() 56 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_read_group() 70 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_mmio_write_group() 74 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); in vgic_mmio_write_group() 76 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_group() 93 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_mmio_read_enable() 98 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_read_enable() 113 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_mmio_write_senable() 133 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); in vgic_mmio_write_senable() 135 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_senable() [all …]
|
D | vgic-irqfd.c | 19 struct kvm *kvm, int irq_source_id, in vgic_irqfd_set_irq() argument 24 if (!vgic_valid_spi(kvm, spi_id)) in vgic_irqfd_set_irq() 26 return kvm_vgic_inject_irq(kvm, 0, spi_id, level, NULL); in vgic_irqfd_set_irq() 38 int kvm_set_routing_entry(struct kvm *kvm, in kvm_set_routing_entry() argument 86 struct kvm *kvm, int irq_source_id, in kvm_set_msi() argument 91 if (!vgic_has_its(kvm)) in kvm_set_msi() 98 return vgic_its_inject_msi(kvm, &msi); in kvm_set_msi() 107 struct kvm *kvm, int irq_source_id, int level, in kvm_arch_set_irq_inatomic() argument 110 if (e->type == KVM_IRQ_ROUTING_MSI && vgic_has_its(kvm) && level) { in kvm_arch_set_irq_inatomic() 114 if (!vgic_its_inject_cached_translation(kvm, &msi)) in kvm_arch_set_irq_inatomic() [all …]
|
D | vgic.c | 58 static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid) in vgic_get_lpi() argument 60 struct vgic_dist *dist = &kvm->arch.vgic; in vgic_get_lpi() 90 struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, in vgic_get_irq() argument 100 if (intid < (kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) { in vgic_get_irq() 101 intid = array_index_nospec(intid, kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS); in vgic_get_irq() 102 return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS]; in vgic_get_irq() 107 return vgic_get_lpi(kvm, intid); in vgic_get_irq() 125 void __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq) in __vgic_put_lpi_locked() argument 127 struct vgic_dist *dist = &kvm->arch.vgic; in __vgic_put_lpi_locked() 138 void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) in vgic_put_irq() argument [all …]
|
D | vgic-mmio-v3.c | 39 bool vgic_has_its(struct kvm *kvm) in vgic_has_its() argument 41 struct vgic_dist *dist = &kvm->arch.vgic; in vgic_has_its() 49 bool vgic_supports_direct_msis(struct kvm *kvm) in vgic_supports_direct_msis() argument 51 return kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm); in vgic_supports_direct_msis() 64 struct vgic_dist *vgic = &vcpu->kvm->arch.vgic; in vgic_mmio_read_v3_misc() 76 if (vgic_has_its(vcpu->kvm)) { in vgic_mmio_read_v3_misc() 99 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_mmio_write_v3_misc() 107 vgic_kick_vcpus(vcpu->kvm); in vgic_mmio_write_v3_misc() 133 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid); in vgic_mmio_read_irouter() 143 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_read_irouter() [all …]
|
D | vgic-v3.c | 35 u32 model = vcpu->kvm->arch.vgic.vgic_model; in vgic_v3_fold_lr_state() 59 if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid)) in vgic_v3_fold_lr_state() 60 kvm_notify_acked_irq(vcpu->kvm, 0, in vgic_v3_fold_lr_state() 63 irq = vgic_get_irq(vcpu->kvm, vcpu, intid); in vgic_v3_fold_lr_state() 111 vgic_put_irq(vcpu->kvm, irq); in vgic_v3_fold_lr_state() 120 u32 model = vcpu->kvm->arch.vgic.vgic_model; in vgic_v3_populate_lr() 208 u32 model = vcpu->kvm->arch.vgic.vgic_model; in vgic_v3_set_vmcr() 238 u32 model = vcpu->kvm->arch.vgic.vgic_model; in vgic_v3_get_vmcr() 288 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { in vgic_v3_enable() 314 int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq) in vgic_v3_lpi_sync_pending_status() argument [all …]
|
D | vgic-mmio-v2.c | 28 struct vgic_dist *vgic = &vcpu->kvm->arch.vgic; in vgic_mmio_read_v2_misc() 38 value |= (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5; in vgic_mmio_read_v2_misc() 56 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; in vgic_mmio_write_v2_misc() 63 vgic_kick_vcpus(vcpu->kvm); in vgic_mmio_write_v2_misc() 90 vcpu->kvm->arch.vgic.v2_groups_user_writable = true; in vgic_mmio_uaccess_write_v2_misc() 102 if (vcpu->kvm->arch.vgic.v2_groups_user_writable) in vgic_mmio_uaccess_write_v2_group() 112 int nr_vcpus = atomic_read(&source_vcpu->kvm->online_vcpus); in vgic_mmio_write_sgir() 134 kvm_for_each_vcpu(c, vcpu, source_vcpu->kvm) { in vgic_mmio_write_sgir() 140 irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid); in vgic_mmio_write_sgir() 146 vgic_queue_irq_unlock(source_vcpu->kvm, irq, flags); in vgic_mmio_write_sgir() [all …]
|
D | vgic-mmio.h | 16 unsigned long (*its_read)(struct kvm *kvm, struct vgic_its *its, 22 void (*its_write)(struct kvm *kvm, struct vgic_its *its, 31 int (*uaccess_its_write)(struct kvm *kvm, struct vgic_its *its, 101 int kvm_vgic_register_mmio_region(struct kvm *kvm, struct kvm_vcpu *vcpu,
|
D | vgic-v2.c | 70 if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid)) in vgic_v2_fold_lr_state() 71 kvm_notify_acked_irq(vcpu->kvm, 0, in vgic_v2_fold_lr_state() 74 irq = vgic_get_irq(vcpu->kvm, vcpu, intid); in vgic_v2_fold_lr_state() 120 vgic_put_irq(vcpu->kvm, irq); in vgic_v2_fold_lr_state() 304 int vgic_v2_map_resources(struct kvm *kvm) in vgic_v2_map_resources() argument 306 struct vgic_dist *dist = &kvm->arch.vgic; in vgic_v2_map_resources() 309 if (vgic_ready(kvm)) in vgic_v2_map_resources() 329 ret = vgic_init(kvm); in vgic_v2_map_resources() 335 ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2); in vgic_v2_map_resources() 342 ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base, in vgic_v2_map_resources()
|
/virt/kvm/arm/ |
D | mmu.c | 57 void kvm_flush_remote_tlbs(struct kvm *kvm) in kvm_flush_remote_tlbs() argument 59 kvm_call_hyp(__kvm_tlb_flush_vmid, kvm); in kvm_flush_remote_tlbs() 62 static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) in kvm_tlb_flush_vmid_ipa() argument 64 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa); in kvm_tlb_flush_vmid_ipa() 100 static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd) in stage2_dissolve_pmd() argument 106 kvm_tlb_flush_vmid_ipa(kvm, addr); in stage2_dissolve_pmd() 118 static void stage2_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp) in stage2_dissolve_pud() argument 120 if (!stage2_pud_huge(kvm, *pudp)) in stage2_dissolve_pud() 123 stage2_pud_clear(kvm, pudp); in stage2_dissolve_pud() 124 kvm_tlb_flush_vmid_ipa(kvm, addr); in stage2_dissolve_pud() [all …]
|
D | arm.c | 106 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) in kvm_arch_init_vm() argument 110 ret = kvm_arm_setup_stage2(kvm, type); in kvm_arch_init_vm() 114 kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran)); in kvm_arch_init_vm() 115 if (!kvm->arch.last_vcpu_ran) in kvm_arch_init_vm() 119 *per_cpu_ptr(kvm->arch.last_vcpu_ran, cpu) = -1; in kvm_arch_init_vm() 121 ret = kvm_alloc_stage2_pgd(kvm); in kvm_arch_init_vm() 125 ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP); in kvm_arch_init_vm() 129 kvm_vgic_early_init(kvm); in kvm_arch_init_vm() 132 kvm->arch.vmid.vmid_gen = 0; in kvm_arch_init_vm() 135 kvm->arch.max_vcpus = vgic_present ? in kvm_arch_init_vm() [all …]
|
D | psci.c | 97 struct kvm *kvm = source_vcpu->kvm; in kvm_psci_vcpu_on() local 105 vcpu = kvm_mpidr_to_vcpu(kvm, cpu_id); in kvm_psci_vcpu_on() 114 if (kvm_psci_version(source_vcpu, kvm) != KVM_ARM_PSCI_0_1) in kvm_psci_vcpu_on() 155 struct kvm *kvm = vcpu->kvm; in kvm_psci_vcpu_affinity_info() local 173 kvm_for_each_vcpu(i, tmp, kvm) { in kvm_psci_vcpu_affinity_info() 202 kvm_for_each_vcpu(i, tmp, vcpu->kvm) in kvm_prepare_system_event() 204 kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP); in kvm_prepare_system_event() 223 struct kvm *kvm = vcpu->kvm; in kvm_psci_0_2_call() local 246 mutex_lock(&kvm->lock); in kvm_psci_0_2_call() 248 mutex_unlock(&kvm->lock); in kvm_psci_0_2_call() [all …]
|
D | arch_timer.c | 74 static inline bool userspace_irqchip(struct kvm *kvm) in userspace_irqchip() argument 77 unlikely(!irqchip_in_kernel(kvm)); in userspace_irqchip() 116 if (userspace_irqchip(vcpu->kvm) && in kvm_arch_timer_handler() 303 if (!userspace_irqchip(vcpu->kvm)) { in kvm_timer_update_irq() 304 ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, in kvm_timer_update_irq() 487 if (irqchip_in_kernel(vcpu->kvm)) in kvm_timer_vcpu_load_gic() 560 if (likely(irqchip_in_kernel(vcpu->kvm))) in kvm_timer_should_notify_user() 634 if (unlikely(!irqchip_in_kernel(vcpu->kvm))) in kvm_timer_sync_hwstate() 658 if (irqchip_in_kernel(vcpu->kvm)) { in kvm_timer_vcpu_reset() 675 struct kvm *kvm = vcpu->kvm; in update_vtimer_cntvoff() local [all …]
|
D | pmu.c | 382 if (likely(irqchip_in_kernel(vcpu->kvm))) { in kvm_pmu_update_state() 383 int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, in kvm_pmu_update_state() 395 if (likely(irqchip_in_kernel(vcpu->kvm))) in kvm_pmu_should_notify_user() 709 if (irqchip_in_kernel(vcpu->kvm)) { in kvm_arm_pmu_v3_enable() 720 if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq)) in kvm_arm_pmu_v3_enable() 743 if (irqchip_in_kernel(vcpu->kvm)) { in kvm_arm_pmu_v3_init() 751 if (!vgic_initialized(vcpu->kvm)) in kvm_arm_pmu_v3_init() 772 static bool pmu_irq_is_valid(struct kvm *kvm, int irq) in pmu_irq_is_valid() argument 777 kvm_for_each_vcpu(i, vcpu, kvm) { in pmu_irq_is_valid() 800 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_arm_pmu_v3_set_attr() [all …]
|