• Home
  • Raw
  • Download

Lines Matching refs:desc

38 static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)  in __synchronize_hardirq()  argument
40 struct irq_data *irqd = irq_desc_get_irq_data(desc); in __synchronize_hardirq()
50 while (irqd_irq_inprogress(&desc->irq_data)) in __synchronize_hardirq()
54 raw_spin_lock_irqsave(&desc->lock, flags); in __synchronize_hardirq()
55 inprogress = irqd_irq_inprogress(&desc->irq_data); in __synchronize_hardirq()
70 raw_spin_unlock_irqrestore(&desc->lock, flags); in __synchronize_hardirq()
100 struct irq_desc *desc = irq_to_desc(irq); in synchronize_hardirq() local
102 if (desc) { in synchronize_hardirq()
103 __synchronize_hardirq(desc, false); in synchronize_hardirq()
104 return !atomic_read(&desc->threads_active); in synchronize_hardirq()
128 struct irq_desc *desc = irq_to_desc(irq); in synchronize_irq() local
130 if (desc) { in synchronize_irq()
131 __synchronize_hardirq(desc, true); in synchronize_irq()
137 wait_event(desc->wait_for_threads, in synchronize_irq()
138 !atomic_read(&desc->threads_active)); in synchronize_irq()
146 static bool __irq_can_set_affinity(struct irq_desc *desc) in __irq_can_set_affinity() argument
148 if (!desc || !irqd_can_balance(&desc->irq_data) || in __irq_can_set_affinity()
149 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) in __irq_can_set_affinity()
173 struct irq_desc *desc = irq_to_desc(irq); in irq_can_set_affinity_usr() local
175 return __irq_can_set_affinity(desc) && in irq_can_set_affinity_usr()
176 !irqd_affinity_is_managed(&desc->irq_data); in irq_can_set_affinity_usr()
188 void irq_set_thread_affinity(struct irq_desc *desc) in irq_set_thread_affinity() argument
192 for_each_action_of_desc(desc, action) in irq_set_thread_affinity()
213 struct irq_desc *desc = irq_data_to_desc(data); in irq_do_set_affinity() local
224 cpumask_copy(desc->irq_common_data.affinity, mask); in irq_do_set_affinity()
228 irq_set_thread_affinity(desc); in irq_do_set_affinity()
239 struct irq_desc *desc = irq_data_to_desc(data); in irq_set_affinity_pending() local
242 irq_copy_pending(desc, dest); in irq_set_affinity_pending()
272 struct irq_desc *desc = irq_data_to_desc(data); in irq_set_affinity_locked() local
282 irq_copy_pending(desc, mask); in irq_set_affinity_locked()
285 if (desc->affinity_notify) { in irq_set_affinity_locked()
286 kref_get(&desc->affinity_notify->kref); in irq_set_affinity_locked()
287 schedule_work(&desc->affinity_notify->work); in irq_set_affinity_locked()
296 struct irq_desc *desc = irq_to_desc(irq); in __irq_set_affinity() local
300 if (!desc) in __irq_set_affinity()
303 raw_spin_lock_irqsave(&desc->lock, flags); in __irq_set_affinity()
304 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force); in __irq_set_affinity()
305 raw_spin_unlock_irqrestore(&desc->lock, flags); in __irq_set_affinity()
312 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); in irq_set_affinity_hint() local
314 if (!desc) in irq_set_affinity_hint()
316 desc->affinity_hint = m; in irq_set_affinity_hint()
317 irq_put_desc_unlock(desc, flags); in irq_set_affinity_hint()
329 struct irq_desc *desc = irq_to_desc(notify->irq); in irq_affinity_notify() local
333 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) in irq_affinity_notify()
336 raw_spin_lock_irqsave(&desc->lock, flags); in irq_affinity_notify()
337 if (irq_move_pending(&desc->irq_data)) in irq_affinity_notify()
338 irq_get_pending(cpumask, desc); in irq_affinity_notify()
340 cpumask_copy(cpumask, desc->irq_common_data.affinity); in irq_affinity_notify()
341 raw_spin_unlock_irqrestore(&desc->lock, flags); in irq_affinity_notify()
364 struct irq_desc *desc = irq_to_desc(irq); in irq_set_affinity_notifier() local
371 if (!desc || desc->istate & IRQS_NMI) in irq_set_affinity_notifier()
381 raw_spin_lock_irqsave(&desc->lock, flags); in irq_set_affinity_notifier()
382 old_notify = desc->affinity_notify; in irq_set_affinity_notifier()
383 desc->affinity_notify = notify; in irq_set_affinity_notifier()
384 raw_spin_unlock_irqrestore(&desc->lock, flags); in irq_set_affinity_notifier()
399 int irq_setup_affinity(struct irq_desc *desc) in irq_setup_affinity() argument
402 int ret, node = irq_desc_get_node(desc); in irq_setup_affinity()
407 if (!__irq_can_set_affinity(desc)) in irq_setup_affinity()
415 if (irqd_affinity_is_managed(&desc->irq_data) || in irq_setup_affinity()
416 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { in irq_setup_affinity()
417 if (cpumask_intersects(desc->irq_common_data.affinity, in irq_setup_affinity()
419 set = desc->irq_common_data.affinity; in irq_setup_affinity()
421 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); in irq_setup_affinity()
435 ret = irq_do_set_affinity(&desc->irq_data, &mask, false); in irq_setup_affinity()
441 int irq_setup_affinity(struct irq_desc *desc) in irq_setup_affinity() argument
443 return irq_select_affinity(irq_desc_get_irq(desc)); in irq_setup_affinity()
452 struct irq_desc *desc = irq_to_desc(irq); in irq_select_affinity_usr() local
456 raw_spin_lock_irqsave(&desc->lock, flags); in irq_select_affinity_usr()
457 ret = irq_setup_affinity(desc); in irq_select_affinity_usr()
458 raw_spin_unlock_irqrestore(&desc->lock, flags); in irq_select_affinity_usr()
477 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); in irq_set_vcpu_affinity() local
482 if (!desc) in irq_set_vcpu_affinity()
485 data = irq_desc_get_irq_data(desc); in irq_set_vcpu_affinity()
499 irq_put_desc_unlock(desc, flags); in irq_set_vcpu_affinity()
505 void __disable_irq(struct irq_desc *desc) in __disable_irq() argument
507 if (!desc->depth++) in __disable_irq()
508 irq_disable(desc); in __disable_irq()
514 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); in __disable_irq_nosync() local
516 if (!desc) in __disable_irq_nosync()
518 __disable_irq(desc); in __disable_irq_nosync()
519 irq_put_desc_busunlock(desc, flags); in __disable_irq_nosync()
600 void __enable_irq(struct irq_desc *desc) in __enable_irq() argument
602 switch (desc->depth) { in __enable_irq()
606 irq_desc_get_irq(desc)); in __enable_irq()
609 if (desc->istate & IRQS_SUSPENDED) in __enable_irq()
612 irq_settings_set_noprobe(desc); in __enable_irq()
620 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE); in __enable_irq()
624 desc->depth--; in __enable_irq()
642 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); in enable_irq() local
644 if (!desc) in enable_irq()
646 if (WARN(!desc->irq_data.chip, in enable_irq()
650 __enable_irq(desc); in enable_irq()
652 irq_put_desc_busunlock(desc, flags); in enable_irq()
672 struct irq_desc *desc = irq_to_desc(irq); in set_irq_wake_real() local
675 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE) in set_irq_wake_real()
678 if (desc->irq_data.chip->irq_set_wake) in set_irq_wake_real()
679 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); in set_irq_wake_real()
699 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); in irq_set_irq_wake() local
702 if (!desc) in irq_set_irq_wake()
706 if (desc->istate & IRQS_NMI) { in irq_set_irq_wake()
715 if (desc->wake_depth++ == 0) { in irq_set_irq_wake()
718 desc->wake_depth = 0; in irq_set_irq_wake()
720 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); in irq_set_irq_wake()
723 if (desc->wake_depth == 0) { in irq_set_irq_wake()
725 } else if (--desc->wake_depth == 0) { in irq_set_irq_wake()
728 desc->wake_depth = 1; in irq_set_irq_wake()
730 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); in irq_set_irq_wake()
735 irq_put_desc_busunlock(desc, flags); in irq_set_irq_wake()
748 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); in can_request_irq() local
751 if (!desc) in can_request_irq()
754 if (irq_settings_can_request(desc)) { in can_request_irq()
755 if (!desc->action || in can_request_irq()
756 irqflags & desc->action->flags & IRQF_SHARED) in can_request_irq()
759 irq_put_desc_unlock(desc, flags); in can_request_irq()
763 int __irq_set_trigger(struct irq_desc *desc, unsigned long flags) in __irq_set_trigger() argument
765 struct irq_chip *chip = desc->irq_data.chip; in __irq_set_trigger()
774 irq_desc_get_irq(desc), in __irq_set_trigger()
780 if (!irqd_irq_masked(&desc->irq_data)) in __irq_set_trigger()
781 mask_irq(desc); in __irq_set_trigger()
782 if (!irqd_irq_disabled(&desc->irq_data)) in __irq_set_trigger()
788 ret = chip->irq_set_type(&desc->irq_data, flags); in __irq_set_trigger()
793 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); in __irq_set_trigger()
794 irqd_set(&desc->irq_data, flags); in __irq_set_trigger()
798 flags = irqd_get_trigger_type(&desc->irq_data); in __irq_set_trigger()
799 irq_settings_set_trigger_mask(desc, flags); in __irq_set_trigger()
800 irqd_clear(&desc->irq_data, IRQD_LEVEL); in __irq_set_trigger()
801 irq_settings_clr_level(desc); in __irq_set_trigger()
803 irq_settings_set_level(desc); in __irq_set_trigger()
804 irqd_set(&desc->irq_data, IRQD_LEVEL); in __irq_set_trigger()
811 flags, irq_desc_get_irq(desc), chip->irq_set_type); in __irq_set_trigger()
814 unmask_irq(desc); in __irq_set_trigger()
822 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); in irq_set_parent() local
824 if (!desc) in irq_set_parent()
827 desc->parent_irq = parent_irq; in irq_set_parent()
829 irq_put_desc_unlock(desc, flags); in irq_set_parent()
891 static void irq_finalize_oneshot(struct irq_desc *desc, in irq_finalize_oneshot() argument
894 if (!(desc->istate & IRQS_ONESHOT) || in irq_finalize_oneshot()
898 chip_bus_lock(desc); in irq_finalize_oneshot()
899 raw_spin_lock_irq(&desc->lock); in irq_finalize_oneshot()
915 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { in irq_finalize_oneshot()
916 raw_spin_unlock_irq(&desc->lock); in irq_finalize_oneshot()
917 chip_bus_sync_unlock(desc); in irq_finalize_oneshot()
930 desc->threads_oneshot &= ~action->thread_mask; in irq_finalize_oneshot()
932 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && in irq_finalize_oneshot()
933 irqd_irq_masked(&desc->irq_data)) in irq_finalize_oneshot()
934 unmask_threaded_irq(desc); in irq_finalize_oneshot()
937 raw_spin_unlock_irq(&desc->lock); in irq_finalize_oneshot()
938 chip_bus_sync_unlock(desc); in irq_finalize_oneshot()
946 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) in irq_thread_check_affinity() argument
963 raw_spin_lock_irq(&desc->lock); in irq_thread_check_affinity()
968 if (cpumask_available(desc->irq_common_data.affinity)) { in irq_thread_check_affinity()
971 m = irq_data_get_effective_affinity_mask(&desc->irq_data); in irq_thread_check_affinity()
976 raw_spin_unlock_irq(&desc->lock); in irq_thread_check_affinity()
984 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } in irq_thread_check_affinity() argument
994 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) in irq_forced_thread_fn() argument
1001 atomic_inc(&desc->threads_handled); in irq_forced_thread_fn()
1003 irq_finalize_oneshot(desc, action); in irq_forced_thread_fn()
1013 static irqreturn_t irq_thread_fn(struct irq_desc *desc, in irq_thread_fn() argument
1020 atomic_inc(&desc->threads_handled); in irq_thread_fn()
1022 irq_finalize_oneshot(desc, action); in irq_thread_fn()
1026 static void wake_threads_waitq(struct irq_desc *desc) in wake_threads_waitq() argument
1028 if (atomic_dec_and_test(&desc->threads_active)) in wake_threads_waitq()
1029 wake_up(&desc->wait_for_threads); in wake_threads_waitq()
1035 struct irq_desc *desc; in irq_thread_dtor() local
1047 desc = irq_to_desc(action->irq); in irq_thread_dtor()
1053 wake_threads_waitq(desc); in irq_thread_dtor()
1056 irq_finalize_oneshot(desc, action); in irq_thread_dtor()
1059 static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action) in irq_wake_secondary() argument
1066 raw_spin_lock_irq(&desc->lock); in irq_wake_secondary()
1067 __irq_wake_thread(desc, secondary); in irq_wake_secondary()
1068 raw_spin_unlock_irq(&desc->lock); in irq_wake_secondary()
1078 struct irq_desc *desc = irq_to_desc(action->irq); in irq_thread() local
1079 irqreturn_t (*handler_fn)(struct irq_desc *desc, in irq_thread()
1091 irq_thread_check_affinity(desc, action); in irq_thread()
1096 irq_thread_check_affinity(desc, action); in irq_thread()
1098 action_ret = handler_fn(desc, action); in irq_thread()
1100 irq_wake_secondary(desc, action); in irq_thread()
1102 wake_threads_waitq(desc); in irq_thread()
1123 struct irq_desc *desc = irq_to_desc(irq); in irq_wake_thread() local
1127 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) in irq_wake_thread()
1130 raw_spin_lock_irqsave(&desc->lock, flags); in irq_wake_thread()
1131 for_each_action_of_desc(desc, action) { in irq_wake_thread()
1134 __irq_wake_thread(desc, action); in irq_wake_thread()
1138 raw_spin_unlock_irqrestore(&desc->lock, flags); in irq_wake_thread()
1181 static int irq_request_resources(struct irq_desc *desc) in irq_request_resources() argument
1183 struct irq_data *d = &desc->irq_data; in irq_request_resources()
1189 static void irq_release_resources(struct irq_desc *desc) in irq_release_resources() argument
1191 struct irq_data *d = &desc->irq_data; in irq_release_resources()
1198 static bool irq_supports_nmi(struct irq_desc *desc) in irq_supports_nmi() argument
1200 struct irq_data *d = irq_desc_get_irq_data(desc); in irq_supports_nmi()
1214 static int irq_nmi_setup(struct irq_desc *desc) in irq_nmi_setup() argument
1216 struct irq_data *d = irq_desc_get_irq_data(desc); in irq_nmi_setup()
1222 static void irq_nmi_teardown(struct irq_desc *desc) in irq_nmi_teardown() argument
1224 struct irq_data *d = irq_desc_get_irq_data(desc); in irq_nmi_teardown()
1287 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) in __setup_irq() argument
1293 if (!desc) in __setup_irq()
1296 if (desc->irq_data.chip == &no_irq_chip) in __setup_irq()
1298 if (!try_module_get(desc->owner)) in __setup_irq()
1308 new->flags |= irqd_get_trigger_type(&desc->irq_data); in __setup_irq()
1314 nested = irq_settings_is_nested_thread(desc); in __setup_irq()
1327 if (irq_settings_can_thread(desc)) { in __setup_irq()
1359 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE) in __setup_irq()
1369 mutex_lock(&desc->request_mutex); in __setup_irq()
1376 chip_bus_lock(desc); in __setup_irq()
1379 if (!desc->action) { in __setup_irq()
1380 ret = irq_request_resources(desc); in __setup_irq()
1383 new->name, irq, desc->irq_data.chip->name); in __setup_irq()
1394 raw_spin_lock_irqsave(&desc->lock, flags); in __setup_irq()
1395 old_ptr = &desc->action; in __setup_irq()
1408 if (desc->istate & IRQS_NMI) { in __setup_irq()
1410 new->name, irq, desc->irq_data.chip->name); in __setup_irq()
1419 if (irqd_trigger_type_was_set(&desc->irq_data)) { in __setup_irq()
1420 oldtype = irqd_get_trigger_type(&desc->irq_data); in __setup_irq()
1423 irqd_set_trigger_type(&desc->irq_data, oldtype); in __setup_irq()
1487 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) { in __setup_irq()
1510 init_waitqueue_head(&desc->wait_for_threads); in __setup_irq()
1514 ret = __irq_set_trigger(desc, in __setup_irq()
1532 ret = irq_activate(desc); in __setup_irq()
1536 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ in __setup_irq()
1538 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); in __setup_irq()
1541 irqd_set(&desc->irq_data, IRQD_PER_CPU); in __setup_irq()
1542 irq_settings_set_per_cpu(desc); in __setup_irq()
1546 desc->istate |= IRQS_ONESHOT; in __setup_irq()
1550 irq_settings_set_no_balancing(desc); in __setup_irq()
1551 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); in __setup_irq()
1554 if (irq_settings_can_autoenable(desc)) { in __setup_irq()
1555 irq_startup(desc, IRQ_RESEND, IRQ_START_COND); in __setup_irq()
1565 desc->depth = 1; in __setup_irq()
1570 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data); in __setup_irq()
1580 irq_pm_install_action(desc, new); in __setup_irq()
1583 desc->irq_count = 0; in __setup_irq()
1584 desc->irqs_unhandled = 0; in __setup_irq()
1590 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { in __setup_irq()
1591 desc->istate &= ~IRQS_SPURIOUS_DISABLED; in __setup_irq()
1592 __enable_irq(desc); in __setup_irq()
1595 raw_spin_unlock_irqrestore(&desc->lock, flags); in __setup_irq()
1596 chip_bus_sync_unlock(desc); in __setup_irq()
1597 mutex_unlock(&desc->request_mutex); in __setup_irq()
1599 irq_setup_timings(desc, new); in __setup_irq()
1610 register_irq_proc(irq, desc); in __setup_irq()
1626 raw_spin_unlock_irqrestore(&desc->lock, flags); in __setup_irq()
1628 if (!desc->action) in __setup_irq()
1629 irq_release_resources(desc); in __setup_irq()
1631 chip_bus_sync_unlock(desc); in __setup_irq()
1632 mutex_unlock(&desc->request_mutex); in __setup_irq()
1650 module_put(desc->owner); in __setup_irq()
1664 struct irq_desc *desc = irq_to_desc(irq); in setup_irq() local
1666 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) in setup_irq()
1669 retval = irq_chip_pm_get(&desc->irq_data); in setup_irq()
1673 retval = __setup_irq(irq, desc, act); in setup_irq()
1676 irq_chip_pm_put(&desc->irq_data); in setup_irq()
1686 static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id) in __free_irq() argument
1688 unsigned irq = desc->irq_data.irq; in __free_irq()
1694 mutex_lock(&desc->request_mutex); in __free_irq()
1695 chip_bus_lock(desc); in __free_irq()
1696 raw_spin_lock_irqsave(&desc->lock, flags); in __free_irq()
1702 action_ptr = &desc->action; in __free_irq()
1708 raw_spin_unlock_irqrestore(&desc->lock, flags); in __free_irq()
1709 chip_bus_sync_unlock(desc); in __free_irq()
1710 mutex_unlock(&desc->request_mutex); in __free_irq()
1722 irq_pm_remove_action(desc, action); in __free_irq()
1725 if (!desc->action) { in __free_irq()
1726 irq_settings_clr_disable_unlazy(desc); in __free_irq()
1728 irq_shutdown(desc); in __free_irq()
1733 if (WARN_ON_ONCE(desc->affinity_hint)) in __free_irq()
1734 desc->affinity_hint = NULL; in __free_irq()
1737 raw_spin_unlock_irqrestore(&desc->lock, flags); in __free_irq()
1752 chip_bus_sync_unlock(desc); in __free_irq()
1761 __synchronize_hardirq(desc, true); in __free_irq()
1795 if (!desc->action) { in __free_irq()
1800 chip_bus_lock(desc); in __free_irq()
1805 raw_spin_lock_irqsave(&desc->lock, flags); in __free_irq()
1806 irq_domain_deactivate_irq(&desc->irq_data); in __free_irq()
1807 raw_spin_unlock_irqrestore(&desc->lock, flags); in __free_irq()
1809 irq_release_resources(desc); in __free_irq()
1810 chip_bus_sync_unlock(desc); in __free_irq()
1811 irq_remove_timings(desc); in __free_irq()
1814 mutex_unlock(&desc->request_mutex); in __free_irq()
1816 irq_chip_pm_put(&desc->irq_data); in __free_irq()
1817 module_put(desc->owner); in __free_irq()
1831 struct irq_desc *desc = irq_to_desc(irq); in remove_irq() local
1833 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc))) in remove_irq()
1834 __free_irq(desc, act->dev_id); in remove_irq()
1856 struct irq_desc *desc = irq_to_desc(irq); in free_irq() local
1860 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) in free_irq()
1864 if (WARN_ON(desc->affinity_notify)) in free_irq()
1865 desc->affinity_notify = NULL; in free_irq()
1868 action = __free_irq(desc, dev_id); in free_irq()
1880 static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc) in __cleanup_nmi() argument
1884 desc->istate &= ~IRQS_NMI; in __cleanup_nmi()
1886 if (!WARN_ON(desc->action == NULL)) { in __cleanup_nmi()
1887 irq_pm_remove_action(desc, desc->action); in __cleanup_nmi()
1888 devname = desc->action->name; in __cleanup_nmi()
1889 unregister_handler_proc(irq, desc->action); in __cleanup_nmi()
1891 kfree(desc->action); in __cleanup_nmi()
1892 desc->action = NULL; in __cleanup_nmi()
1895 irq_settings_clr_disable_unlazy(desc); in __cleanup_nmi()
1896 irq_shutdown_and_deactivate(desc); in __cleanup_nmi()
1898 irq_release_resources(desc); in __cleanup_nmi()
1900 irq_chip_pm_put(&desc->irq_data); in __cleanup_nmi()
1901 module_put(desc->owner); in __cleanup_nmi()
1908 struct irq_desc *desc = irq_to_desc(irq); in free_nmi() local
1912 if (!desc || WARN_ON(!(desc->istate & IRQS_NMI))) in free_nmi()
1915 if (WARN_ON(irq_settings_is_per_cpu_devid(desc))) in free_nmi()
1919 if (WARN_ON(desc->depth == 0)) in free_nmi()
1922 raw_spin_lock_irqsave(&desc->lock, flags); in free_nmi()
1924 irq_nmi_teardown(desc); in free_nmi()
1925 devname = __cleanup_nmi(irq, desc); in free_nmi()
1927 raw_spin_unlock_irqrestore(&desc->lock, flags); in free_nmi()
1979 struct irq_desc *desc; in request_threaded_irq() local
1999 desc = irq_to_desc(irq); in request_threaded_irq()
2000 if (!desc) in request_threaded_irq()
2003 if (!irq_settings_can_request(desc) || in request_threaded_irq()
2004 WARN_ON(irq_settings_is_per_cpu_devid(desc))) in request_threaded_irq()
2023 retval = irq_chip_pm_get(&desc->irq_data); in request_threaded_irq()
2029 retval = __setup_irq(irq, desc, action); in request_threaded_irq()
2032 irq_chip_pm_put(&desc->irq_data); in request_threaded_irq()
2080 struct irq_desc *desc; in request_any_context_irq() local
2086 desc = irq_to_desc(irq); in request_any_context_irq()
2087 if (!desc) in request_any_context_irq()
2090 if (irq_settings_is_nested_thread(desc)) { in request_any_context_irq()
2131 struct irq_desc *desc; in request_nmi() local
2148 desc = irq_to_desc(irq); in request_nmi()
2150 if (!desc || irq_settings_can_autoenable(desc) || in request_nmi()
2151 !irq_settings_can_request(desc) || in request_nmi()
2152 WARN_ON(irq_settings_is_per_cpu_devid(desc)) || in request_nmi()
2153 !irq_supports_nmi(desc)) in request_nmi()
2165 retval = irq_chip_pm_get(&desc->irq_data); in request_nmi()
2169 retval = __setup_irq(irq, desc, action); in request_nmi()
2173 raw_spin_lock_irqsave(&desc->lock, flags); in request_nmi()
2176 desc->istate |= IRQS_NMI; in request_nmi()
2177 retval = irq_nmi_setup(desc); in request_nmi()
2179 __cleanup_nmi(irq, desc); in request_nmi()
2180 raw_spin_unlock_irqrestore(&desc->lock, flags); in request_nmi()
2184 raw_spin_unlock_irqrestore(&desc->lock, flags); in request_nmi()
2189 irq_chip_pm_put(&desc->irq_data); in request_nmi()
2200 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); in enable_percpu_irq() local
2202 if (!desc) in enable_percpu_irq()
2211 type = irqd_get_trigger_type(&desc->irq_data); in enable_percpu_irq()
2216 ret = __irq_set_trigger(desc, type); in enable_percpu_irq()
2224 irq_percpu_enable(desc, cpu); in enable_percpu_irq()
2226 irq_put_desc_unlock(desc, flags); in enable_percpu_irq()
2245 struct irq_desc *desc; in irq_percpu_is_enabled() local
2249 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); in irq_percpu_is_enabled()
2250 if (!desc) in irq_percpu_is_enabled()
2253 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled); in irq_percpu_is_enabled()
2254 irq_put_desc_unlock(desc, flags); in irq_percpu_is_enabled()
2264 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); in disable_percpu_irq() local
2266 if (!desc) in disable_percpu_irq()
2269 irq_percpu_disable(desc, cpu); in disable_percpu_irq()
2270 irq_put_desc_unlock(desc, flags); in disable_percpu_irq()
2284 struct irq_desc *desc = irq_to_desc(irq); in __free_percpu_irq() local
2290 if (!desc) in __free_percpu_irq()
2293 raw_spin_lock_irqsave(&desc->lock, flags); in __free_percpu_irq()
2295 action = desc->action; in __free_percpu_irq()
2301 if (!cpumask_empty(desc->percpu_enabled)) { in __free_percpu_irq()
2303 irq, cpumask_first(desc->percpu_enabled)); in __free_percpu_irq()
2308 desc->action = NULL; in __free_percpu_irq()
2310 desc->istate &= ~IRQS_NMI; in __free_percpu_irq()
2312 raw_spin_unlock_irqrestore(&desc->lock, flags); in __free_percpu_irq()
2316 irq_chip_pm_put(&desc->irq_data); in __free_percpu_irq()
2317 module_put(desc->owner); in __free_percpu_irq()
2321 raw_spin_unlock_irqrestore(&desc->lock, flags); in __free_percpu_irq()
2334 struct irq_desc *desc = irq_to_desc(irq); in remove_percpu_irq() local
2336 if (desc && irq_settings_is_per_cpu_devid(desc)) in remove_percpu_irq()
2354 struct irq_desc *desc = irq_to_desc(irq); in free_percpu_irq() local
2356 if (!desc || !irq_settings_is_per_cpu_devid(desc)) in free_percpu_irq()
2359 chip_bus_lock(desc); in free_percpu_irq()
2361 chip_bus_sync_unlock(desc); in free_percpu_irq()
2367 struct irq_desc *desc = irq_to_desc(irq); in free_percpu_nmi() local
2369 if (!desc || !irq_settings_is_per_cpu_devid(desc)) in free_percpu_nmi()
2372 if (WARN_ON(!(desc->istate & IRQS_NMI))) in free_percpu_nmi()
2387 struct irq_desc *desc = irq_to_desc(irq); in setup_percpu_irq() local
2390 if (!desc || !irq_settings_is_per_cpu_devid(desc)) in setup_percpu_irq()
2393 retval = irq_chip_pm_get(&desc->irq_data); in setup_percpu_irq()
2397 retval = __setup_irq(irq, desc, act); in setup_percpu_irq()
2400 irq_chip_pm_put(&desc->irq_data); in setup_percpu_irq()
2427 struct irq_desc *desc; in __request_percpu_irq() local
2433 desc = irq_to_desc(irq); in __request_percpu_irq()
2434 if (!desc || !irq_settings_can_request(desc) || in __request_percpu_irq()
2435 !irq_settings_is_per_cpu_devid(desc)) in __request_percpu_irq()
2450 retval = irq_chip_pm_get(&desc->irq_data); in __request_percpu_irq()
2456 retval = __setup_irq(irq, desc, action); in __request_percpu_irq()
2459 irq_chip_pm_put(&desc->irq_data); in __request_percpu_irq()
2492 struct irq_desc *desc; in request_percpu_nmi() local
2499 desc = irq_to_desc(irq); in request_percpu_nmi()
2501 if (!desc || !irq_settings_can_request(desc) || in request_percpu_nmi()
2502 !irq_settings_is_per_cpu_devid(desc) || in request_percpu_nmi()
2503 irq_settings_can_autoenable(desc) || in request_percpu_nmi()
2504 !irq_supports_nmi(desc)) in request_percpu_nmi()
2508 if (desc->istate & IRQS_NMI) in request_percpu_nmi()
2521 retval = irq_chip_pm_get(&desc->irq_data); in request_percpu_nmi()
2525 retval = __setup_irq(irq, desc, action); in request_percpu_nmi()
2529 raw_spin_lock_irqsave(&desc->lock, flags); in request_percpu_nmi()
2530 desc->istate |= IRQS_NMI; in request_percpu_nmi()
2531 raw_spin_unlock_irqrestore(&desc->lock, flags); in request_percpu_nmi()
2536 irq_chip_pm_put(&desc->irq_data); in request_percpu_nmi()
2559 struct irq_desc *desc; in prepare_percpu_nmi() local
2564 desc = irq_get_desc_lock(irq, &flags, in prepare_percpu_nmi()
2566 if (!desc) in prepare_percpu_nmi()
2569 if (WARN(!(desc->istate & IRQS_NMI), in prepare_percpu_nmi()
2576 ret = irq_nmi_setup(desc); in prepare_percpu_nmi()
2583 irq_put_desc_unlock(desc, flags); in prepare_percpu_nmi()
2602 struct irq_desc *desc; in teardown_percpu_nmi() local
2606 desc = irq_get_desc_lock(irq, &flags, in teardown_percpu_nmi()
2608 if (!desc) in teardown_percpu_nmi()
2611 if (WARN_ON(!(desc->istate & IRQS_NMI))) in teardown_percpu_nmi()
2614 irq_nmi_teardown(desc); in teardown_percpu_nmi()
2616 irq_put_desc_unlock(desc, flags); in teardown_percpu_nmi()
2657 struct irq_desc *desc; in irq_get_irqchip_state() local
2662 desc = irq_get_desc_buslock(irq, &flags, 0); in irq_get_irqchip_state()
2663 if (!desc) in irq_get_irqchip_state()
2666 data = irq_desc_get_irq_data(desc); in irq_get_irqchip_state()
2670 irq_put_desc_busunlock(desc, flags); in irq_get_irqchip_state()
2690 struct irq_desc *desc; in irq_set_irqchip_state() local
2696 desc = irq_get_desc_buslock(irq, &flags, 0); in irq_set_irqchip_state()
2697 if (!desc) in irq_set_irqchip_state()
2700 data = irq_desc_get_irq_data(desc); in irq_set_irqchip_state()
2716 irq_put_desc_busunlock(desc, flags); in irq_set_irqchip_state()