Lines Matching full:irq
7 #include <linux/irq.h>
56 * matching interrupt ID and return a reference to the IRQ structure.
61 struct vgic_irq *irq = NULL; in vgic_get_lpi() local
66 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { in vgic_get_lpi()
67 if (irq->intid != intid) in vgic_get_lpi()
72 * call vgic_put_irq() later once it's finished with the IRQ. in vgic_get_lpi()
74 vgic_get_irq_kref(irq); in vgic_get_lpi()
77 irq = NULL; in vgic_get_lpi()
82 return irq; in vgic_get_lpi()
88 * to call vgic_put_irq() once it's finished with this IRQ.
125 void __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq) in __vgic_put_lpi_locked() argument
129 if (!kref_put(&irq->refcount, vgic_irq_release)) in __vgic_put_lpi_locked()
132 list_del(&irq->lpi_list); in __vgic_put_lpi_locked()
135 kfree(irq); in __vgic_put_lpi_locked()
138 void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) in vgic_put_irq() argument
143 if (irq->intid < VGIC_MIN_LPI) in vgic_put_irq()
147 __vgic_put_lpi_locked(kvm, irq); in vgic_put_irq()
154 struct vgic_irq *irq, *tmp; in vgic_flush_pending_lpis() local
159 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { in vgic_flush_pending_lpis()
160 if (irq->intid >= VGIC_MIN_LPI) { in vgic_flush_pending_lpis()
161 raw_spin_lock(&irq->irq_lock); in vgic_flush_pending_lpis()
162 list_del(&irq->ap_list); in vgic_flush_pending_lpis()
163 irq->vcpu = NULL; in vgic_flush_pending_lpis()
164 raw_spin_unlock(&irq->irq_lock); in vgic_flush_pending_lpis()
165 vgic_put_irq(vcpu->kvm, irq); in vgic_flush_pending_lpis()
172 void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending) in vgic_irq_set_phys_pending() argument
174 WARN_ON(irq_set_irqchip_state(irq->host_irq, in vgic_irq_set_phys_pending()
179 bool vgic_get_phys_line_level(struct vgic_irq *irq) in vgic_get_phys_line_level() argument
183 BUG_ON(!irq->hw); in vgic_get_phys_line_level()
185 if (irq->get_input_level) in vgic_get_phys_line_level()
186 return irq->get_input_level(irq->intid); in vgic_get_phys_line_level()
188 WARN_ON(irq_get_irqchip_state(irq->host_irq, in vgic_get_phys_line_level()
195 void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active) in vgic_irq_set_phys_active() argument
198 BUG_ON(!irq->hw); in vgic_irq_set_phys_active()
199 WARN_ON(irq_set_irqchip_state(irq->host_irq, in vgic_irq_set_phys_active()
205 * kvm_vgic_target_oracle - compute the target vcpu for an irq
207 * @irq: The irq to route. Must be already locked.
213 * Requires the IRQ lock to be held.
215 static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq) in vgic_target_oracle() argument
217 lockdep_assert_held(&irq->irq_lock); in vgic_target_oracle()
220 if (irq->active) in vgic_target_oracle()
221 return irq->vcpu ? : irq->target_vcpu; in vgic_target_oracle()
224 * If the IRQ is not active but enabled and pending, we should direct in vgic_target_oracle()
229 if (irq->enabled && irq_is_pending(irq)) { in vgic_target_oracle()
230 if (unlikely(irq->target_vcpu && in vgic_target_oracle()
231 !irq->target_vcpu->kvm->arch.vgic.enabled)) in vgic_target_oracle()
234 return irq->target_vcpu; in vgic_target_oracle()
237 /* If neither active nor pending and enabled, then this IRQ should not in vgic_target_oracle()
309 * rising edge, and in-kernel connected IRQ lines can only be controlled by
312 static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owner) in vgic_validate_injection() argument
314 if (irq->owner != owner) in vgic_validate_injection()
317 switch (irq->config) { in vgic_validate_injection()
319 return irq->line_level != level; in vgic_validate_injection()
328 * Check whether an IRQ needs to (and can) be queued to a VCPU's ap list.
330 * Returns true when the IRQ was queued, false otherwise.
332 * Needs to be entered with the IRQ lock already held, but will return
335 bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, in vgic_queue_irq_unlock() argument
340 lockdep_assert_held(&irq->irq_lock); in vgic_queue_irq_unlock()
343 vcpu = vgic_target_oracle(irq); in vgic_queue_irq_unlock()
344 if (irq->vcpu || !vcpu) { in vgic_queue_irq_unlock()
346 * If this IRQ is already on a VCPU's ap_list, then it in vgic_queue_irq_unlock()
350 * Otherwise, if the irq is not pending and enabled, it does in vgic_queue_irq_unlock()
354 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_queue_irq_unlock()
360 * while the IRQ is already on the VCPU's AP list, the in vgic_queue_irq_unlock()
373 * We must unlock the irq lock to take the ap_list_lock where in vgic_queue_irq_unlock()
376 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_queue_irq_unlock()
381 raw_spin_lock(&irq->irq_lock); in vgic_queue_irq_unlock()
387 * 1) The irq lost its pending state or was disabled behind our in vgic_queue_irq_unlock()
389 * 2) Someone changed the affinity on this irq behind our in vgic_queue_irq_unlock()
395 if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) { in vgic_queue_irq_unlock()
396 raw_spin_unlock(&irq->irq_lock); in vgic_queue_irq_unlock()
400 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_queue_irq_unlock()
405 * Grab a reference to the irq to reflect the fact that it is in vgic_queue_irq_unlock()
408 vgic_get_irq_kref(irq); in vgic_queue_irq_unlock()
409 list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head); in vgic_queue_irq_unlock()
410 irq->vcpu = vcpu; in vgic_queue_irq_unlock()
412 raw_spin_unlock(&irq->irq_lock); in vgic_queue_irq_unlock()
422 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
430 * @owner: The opaque pointer to the owner of the IRQ being raised to verify
431 * that the caller is allowed to inject this IRQ. Userspace
442 struct vgic_irq *irq; in kvm_vgic_inject_irq() local
456 irq = vgic_get_irq(kvm, vcpu, intid); in kvm_vgic_inject_irq()
457 if (!irq) in kvm_vgic_inject_irq()
460 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_inject_irq()
462 if (!vgic_validate_injection(irq, level, owner)) { in kvm_vgic_inject_irq()
464 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_inject_irq()
465 vgic_put_irq(kvm, irq); in kvm_vgic_inject_irq()
469 if (irq->config == VGIC_CONFIG_LEVEL) in kvm_vgic_inject_irq()
470 irq->line_level = level; in kvm_vgic_inject_irq()
472 irq->pending_latch = true; in kvm_vgic_inject_irq()
474 vgic_queue_irq_unlock(kvm, irq, flags); in kvm_vgic_inject_irq()
475 vgic_put_irq(kvm, irq); in kvm_vgic_inject_irq()
480 /* @irq->irq_lock must be held */
481 static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq, in kvm_vgic_map_irq() argument
489 * Find the physical IRQ number corresponding to @host_irq in kvm_vgic_map_irq()
500 irq->hw = true; in kvm_vgic_map_irq()
501 irq->host_irq = host_irq; in kvm_vgic_map_irq()
502 irq->hwintid = data->hwirq; in kvm_vgic_map_irq()
503 irq->get_input_level = get_input_level; in kvm_vgic_map_irq()
507 /* @irq->irq_lock must be held */
508 static inline void kvm_vgic_unmap_irq(struct vgic_irq *irq) in kvm_vgic_unmap_irq() argument
510 irq->hw = false; in kvm_vgic_unmap_irq()
511 irq->hwintid = 0; in kvm_vgic_unmap_irq()
512 irq->get_input_level = NULL; in kvm_vgic_unmap_irq()
518 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); in kvm_vgic_map_phys_irq() local
522 BUG_ON(!irq); in kvm_vgic_map_phys_irq()
524 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_map_phys_irq()
525 ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level); in kvm_vgic_map_phys_irq()
526 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_map_phys_irq()
527 vgic_put_irq(vcpu->kvm, irq); in kvm_vgic_map_phys_irq()
533 * kvm_vgic_reset_mapped_irq - Reset a mapped IRQ
543 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); in kvm_vgic_reset_mapped_irq() local
546 if (!irq->hw) in kvm_vgic_reset_mapped_irq()
549 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_reset_mapped_irq()
550 irq->active = false; in kvm_vgic_reset_mapped_irq()
551 irq->pending_latch = false; in kvm_vgic_reset_mapped_irq()
552 irq->line_level = false; in kvm_vgic_reset_mapped_irq()
553 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_reset_mapped_irq()
555 vgic_put_irq(vcpu->kvm, irq); in kvm_vgic_reset_mapped_irq()
560 struct vgic_irq *irq; in kvm_vgic_unmap_phys_irq() local
566 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); in kvm_vgic_unmap_phys_irq()
567 BUG_ON(!irq); in kvm_vgic_unmap_phys_irq()
569 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_unmap_phys_irq()
570 kvm_vgic_unmap_irq(irq); in kvm_vgic_unmap_phys_irq()
571 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_unmap_phys_irq()
572 vgic_put_irq(vcpu->kvm, irq); in kvm_vgic_unmap_phys_irq()
589 struct vgic_irq *irq; in kvm_vgic_set_owner() local
600 irq = vgic_get_irq(vcpu->kvm, vcpu, intid); in kvm_vgic_set_owner()
601 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_set_owner()
602 if (irq->owner && irq->owner != owner) in kvm_vgic_set_owner()
605 irq->owner = owner; in kvm_vgic_set_owner()
606 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_set_owner()
622 struct vgic_irq *irq, *tmp; in vgic_prune_ap_list() local
629 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { in vgic_prune_ap_list()
633 raw_spin_lock(&irq->irq_lock); in vgic_prune_ap_list()
635 BUG_ON(vcpu != irq->vcpu); in vgic_prune_ap_list()
637 target_vcpu = vgic_target_oracle(irq); in vgic_prune_ap_list()
644 list_del(&irq->ap_list); in vgic_prune_ap_list()
645 irq->vcpu = NULL; in vgic_prune_ap_list()
646 raw_spin_unlock(&irq->irq_lock); in vgic_prune_ap_list()
652 * we remove the irq from the list, we drop in vgic_prune_ap_list()
655 vgic_put_irq(vcpu->kvm, irq); in vgic_prune_ap_list()
661 raw_spin_unlock(&irq->irq_lock); in vgic_prune_ap_list()
667 raw_spin_unlock(&irq->irq_lock); in vgic_prune_ap_list()
685 raw_spin_lock(&irq->irq_lock); in vgic_prune_ap_list()
696 if (target_vcpu == vgic_target_oracle(irq)) { in vgic_prune_ap_list()
699 list_del(&irq->ap_list); in vgic_prune_ap_list()
700 irq->vcpu = target_vcpu; in vgic_prune_ap_list()
701 list_add_tail(&irq->ap_list, &new_cpu->ap_list_head); in vgic_prune_ap_list()
705 raw_spin_unlock(&irq->irq_lock); in vgic_prune_ap_list()
730 struct vgic_irq *irq, int lr) in vgic_populate_lr() argument
732 lockdep_assert_held(&irq->irq_lock); in vgic_populate_lr()
735 vgic_v2_populate_lr(vcpu, irq, lr); in vgic_populate_lr()
737 vgic_v3_populate_lr(vcpu, irq, lr); in vgic_populate_lr()
761 struct vgic_irq *irq; in compute_ap_list_depth() local
768 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { in compute_ap_list_depth()
771 raw_spin_lock(&irq->irq_lock); in compute_ap_list_depth()
773 w = vgic_irq_get_lr_count(irq); in compute_ap_list_depth()
774 raw_spin_unlock(&irq->irq_lock); in compute_ap_list_depth()
786 struct vgic_irq *irq; in vgic_flush_lr_state() local
800 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { in vgic_flush_lr_state()
801 raw_spin_lock(&irq->irq_lock); in vgic_flush_lr_state()
805 * guarantee that they are all seen before any IRQ of in vgic_flush_lr_state()
810 if (multi_sgi && irq->priority > prio) { in vgic_flush_lr_state()
811 _raw_spin_unlock(&irq->irq_lock); in vgic_flush_lr_state()
815 if (likely(vgic_target_oracle(irq) == vcpu)) { in vgic_flush_lr_state()
816 vgic_populate_lr(vcpu, irq, count++); in vgic_flush_lr_state()
818 if (irq->source) in vgic_flush_lr_state()
819 prio = irq->priority; in vgic_flush_lr_state()
822 raw_spin_unlock(&irq->irq_lock); in vgic_flush_lr_state()
825 if (!list_is_last(&irq->ap_list, in vgic_flush_lr_state()
957 struct vgic_irq *irq; in kvm_vgic_vcpu_pending_irq() local
972 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { in kvm_vgic_vcpu_pending_irq()
973 raw_spin_lock(&irq->irq_lock); in kvm_vgic_vcpu_pending_irq()
974 pending = irq_is_pending(irq) && irq->enabled && in kvm_vgic_vcpu_pending_irq()
975 !irq->active && in kvm_vgic_vcpu_pending_irq()
976 irq->priority < vmcr.pmr; in kvm_vgic_vcpu_pending_irq()
977 raw_spin_unlock(&irq->irq_lock); in kvm_vgic_vcpu_pending_irq()
1007 struct vgic_irq *irq; in kvm_vgic_map_is_active() local
1014 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); in kvm_vgic_map_is_active()
1015 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_map_is_active()
1016 map_is_active = irq->hw && irq->active; in kvm_vgic_map_is_active()
1017 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_map_is_active()
1018 vgic_put_irq(vcpu->kvm, irq); in kvm_vgic_map_is_active()