• Home
  • Raw
  • Download

Lines Matching refs:kvm

52 	struct kvm *kvm = irqfd->kvm;  in irqfd_inject()  local
55 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1, in irqfd_inject()
57 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0, in irqfd_inject()
60 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, in irqfd_inject()
73 struct kvm *kvm; in irqfd_resampler_ack() local
79 kvm = resampler->kvm; in irqfd_resampler_ack()
81 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, in irqfd_resampler_ack()
84 idx = srcu_read_lock(&kvm->irq_srcu); in irqfd_resampler_ack()
89 srcu_read_unlock(&kvm->irq_srcu, idx); in irqfd_resampler_ack()
96 struct kvm *kvm = resampler->kvm; in irqfd_resampler_shutdown() local
98 mutex_lock(&kvm->irqfds.resampler_lock); in irqfd_resampler_shutdown()
101 synchronize_srcu(&kvm->irq_srcu); in irqfd_resampler_shutdown()
105 kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier); in irqfd_resampler_shutdown()
106 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, in irqfd_resampler_shutdown()
111 mutex_unlock(&kvm->irqfds.resampler_lock); in irqfd_resampler_shutdown()
122 struct kvm *kvm = irqfd->kvm; in irqfd_shutdown() local
126 synchronize_srcu(&kvm->irq_srcu); in irqfd_shutdown()
180 struct kvm *kvm, int irq_source_id, in kvm_arch_set_irq_inatomic() argument
197 struct kvm *kvm = irqfd->kvm; in irqfd_wakeup() local
202 idx = srcu_read_lock(&kvm->irq_srcu); in irqfd_wakeup()
208 if (kvm_arch_set_irq_inatomic(&irq, kvm, in irqfd_wakeup()
212 srcu_read_unlock(&kvm->irq_srcu, idx); in irqfd_wakeup()
219 spin_lock_irqsave(&kvm->irqfds.lock, flags); in irqfd_wakeup()
233 spin_unlock_irqrestore(&kvm->irqfds.lock, flags); in irqfd_wakeup()
249 static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd) in irqfd_update() argument
255 n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi); in irqfd_update()
280 struct kvm *kvm, unsigned int host_irq, in kvm_arch_update_irqfd_routing() argument
288 kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args) in kvm_irqfd_assign() argument
297 if (!kvm_arch_intc_initialized(kvm)) in kvm_irqfd_assign()
304 irqfd->kvm = kvm; in kvm_irqfd_assign()
337 mutex_lock(&kvm->irqfds.resampler_lock); in kvm_irqfd_assign()
340 &kvm->irqfds.resampler_list, link) { in kvm_irqfd_assign()
351 mutex_unlock(&kvm->irqfds.resampler_lock); in kvm_irqfd_assign()
355 resampler->kvm = kvm; in kvm_irqfd_assign()
361 list_add(&resampler->link, &kvm->irqfds.resampler_list); in kvm_irqfd_assign()
362 kvm_register_irq_ack_notifier(kvm, in kvm_irqfd_assign()
368 synchronize_srcu(&kvm->irq_srcu); in kvm_irqfd_assign()
370 mutex_unlock(&kvm->irqfds.resampler_lock); in kvm_irqfd_assign()
380 spin_lock_irq(&kvm->irqfds.lock); in kvm_irqfd_assign()
383 list_for_each_entry(tmp, &kvm->irqfds.items, list) { in kvm_irqfd_assign()
388 spin_unlock_irq(&kvm->irqfds.lock); in kvm_irqfd_assign()
392 idx = srcu_read_lock(&kvm->irq_srcu); in kvm_irqfd_assign()
393 irqfd_update(kvm, irqfd); in kvm_irqfd_assign()
395 list_add_tail(&irqfd->list, &kvm->irqfds.items); in kvm_irqfd_assign()
397 spin_unlock_irq(&kvm->irqfds.lock); in kvm_irqfd_assign()
420 srcu_read_unlock(&kvm->irq_srcu, idx); in kvm_irqfd_assign()
446 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin) in kvm_irq_has_notifier() argument
451 idx = srcu_read_lock(&kvm->irq_srcu); in kvm_irq_has_notifier()
452 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin); in kvm_irq_has_notifier()
454 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list, in kvm_irq_has_notifier()
457 srcu_read_unlock(&kvm->irq_srcu, idx); in kvm_irq_has_notifier()
461 srcu_read_unlock(&kvm->irq_srcu, idx); in kvm_irq_has_notifier()
467 void kvm_notify_acked_gsi(struct kvm *kvm, int gsi) in kvm_notify_acked_gsi() argument
471 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list, in kvm_notify_acked_gsi()
477 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin) in kvm_notify_acked_irq() argument
483 idx = srcu_read_lock(&kvm->irq_srcu); in kvm_notify_acked_irq()
484 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin); in kvm_notify_acked_irq()
486 kvm_notify_acked_gsi(kvm, gsi); in kvm_notify_acked_irq()
487 srcu_read_unlock(&kvm->irq_srcu, idx); in kvm_notify_acked_irq()
490 void kvm_register_irq_ack_notifier(struct kvm *kvm, in kvm_register_irq_ack_notifier() argument
493 mutex_lock(&kvm->irq_lock); in kvm_register_irq_ack_notifier()
494 hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list); in kvm_register_irq_ack_notifier()
495 mutex_unlock(&kvm->irq_lock); in kvm_register_irq_ack_notifier()
496 kvm_vcpu_request_scan_ioapic(kvm); in kvm_register_irq_ack_notifier()
499 void kvm_unregister_irq_ack_notifier(struct kvm *kvm, in kvm_unregister_irq_ack_notifier() argument
502 mutex_lock(&kvm->irq_lock); in kvm_unregister_irq_ack_notifier()
504 mutex_unlock(&kvm->irq_lock); in kvm_unregister_irq_ack_notifier()
505 synchronize_srcu(&kvm->irq_srcu); in kvm_unregister_irq_ack_notifier()
506 kvm_vcpu_request_scan_ioapic(kvm); in kvm_unregister_irq_ack_notifier()
511 kvm_eventfd_init(struct kvm *kvm) in kvm_eventfd_init() argument
514 spin_lock_init(&kvm->irqfds.lock); in kvm_eventfd_init()
515 INIT_LIST_HEAD(&kvm->irqfds.items); in kvm_eventfd_init()
516 INIT_LIST_HEAD(&kvm->irqfds.resampler_list); in kvm_eventfd_init()
517 mutex_init(&kvm->irqfds.resampler_lock); in kvm_eventfd_init()
519 INIT_LIST_HEAD(&kvm->ioeventfds); in kvm_eventfd_init()
527 kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args) in kvm_irqfd_deassign() argument
536 spin_lock_irq(&kvm->irqfds.lock); in kvm_irqfd_deassign()
538 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) { in kvm_irqfd_deassign()
553 spin_unlock_irq(&kvm->irqfds.lock); in kvm_irqfd_deassign()
567 kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) in kvm_irqfd() argument
573 return kvm_irqfd_deassign(kvm, args); in kvm_irqfd()
575 return kvm_irqfd_assign(kvm, args); in kvm_irqfd()
583 kvm_irqfd_release(struct kvm *kvm) in kvm_irqfd_release() argument
587 spin_lock_irq(&kvm->irqfds.lock); in kvm_irqfd_release()
589 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) in kvm_irqfd_release()
592 spin_unlock_irq(&kvm->irqfds.lock); in kvm_irqfd_release()
606 void kvm_irq_routing_update(struct kvm *kvm) in kvm_irq_routing_update() argument
610 spin_lock_irq(&kvm->irqfds.lock); in kvm_irq_routing_update()
612 list_for_each_entry(irqfd, &kvm->irqfds.items, list) { in kvm_irq_routing_update()
613 irqfd_update(kvm, irqfd); in kvm_irq_routing_update()
618 irqfd->kvm, irqfd->producer->irq, in kvm_irq_routing_update()
625 spin_unlock_irq(&kvm->irqfds.lock); in kvm_irq_routing_update()
760 ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p) in ioeventfd_check_collision() argument
764 list_for_each_entry(_p, &kvm->ioeventfds, list) in ioeventfd_check_collision()
785 static int kvm_assign_ioeventfd_idx(struct kvm *kvm, in kvm_assign_ioeventfd_idx() argument
816 mutex_lock(&kvm->slots_lock); in kvm_assign_ioeventfd_idx()
819 if (ioeventfd_check_collision(kvm, p)) { in kvm_assign_ioeventfd_idx()
826 ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length, in kvm_assign_ioeventfd_idx()
831 kvm->buses[bus_idx]->ioeventfd_count++; in kvm_assign_ioeventfd_idx()
832 list_add_tail(&p->list, &kvm->ioeventfds); in kvm_assign_ioeventfd_idx()
834 mutex_unlock(&kvm->slots_lock); in kvm_assign_ioeventfd_idx()
839 mutex_unlock(&kvm->slots_lock); in kvm_assign_ioeventfd_idx()
849 kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx, in kvm_deassign_ioeventfd_idx() argument
860 mutex_lock(&kvm->slots_lock); in kvm_deassign_ioeventfd_idx()
862 list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) { in kvm_deassign_ioeventfd_idx()
875 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev); in kvm_deassign_ioeventfd_idx()
876 if (kvm->buses[bus_idx]) in kvm_deassign_ioeventfd_idx()
877 kvm->buses[bus_idx]->ioeventfd_count--; in kvm_deassign_ioeventfd_idx()
883 mutex_unlock(&kvm->slots_lock); in kvm_deassign_ioeventfd_idx()
890 static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) in kvm_deassign_ioeventfd() argument
893 int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args); in kvm_deassign_ioeventfd()
896 kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args); in kvm_deassign_ioeventfd()
902 kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) in kvm_assign_ioeventfd() argument
932 ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args); in kvm_assign_ioeventfd()
940 ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args); in kvm_assign_ioeventfd()
948 kvm_deassign_ioeventfd_idx(kvm, bus_idx, args); in kvm_assign_ioeventfd()
954 kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) in kvm_ioeventfd() argument
957 return kvm_deassign_ioeventfd(kvm, args); in kvm_ioeventfd()
959 return kvm_assign_ioeventfd(kvm, args); in kvm_ioeventfd()