• Home
  • Raw
  • Download

Lines Matching refs:desc

38 static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)  in __synchronize_hardirq()  argument
40 struct irq_data *irqd = irq_desc_get_irq_data(desc); in __synchronize_hardirq()
50 while (irqd_irq_inprogress(&desc->irq_data)) in __synchronize_hardirq()
54 raw_spin_lock_irqsave(&desc->lock, flags); in __synchronize_hardirq()
55 inprogress = irqd_irq_inprogress(&desc->irq_data); in __synchronize_hardirq()
70 raw_spin_unlock_irqrestore(&desc->lock, flags); in __synchronize_hardirq()
100 struct irq_desc *desc = irq_to_desc(irq); in synchronize_hardirq() local
102 if (desc) { in synchronize_hardirq()
103 __synchronize_hardirq(desc, false); in synchronize_hardirq()
104 return !atomic_read(&desc->threads_active); in synchronize_hardirq()
128 struct irq_desc *desc = irq_to_desc(irq); in synchronize_irq() local
130 if (desc) { in synchronize_irq()
131 __synchronize_hardirq(desc, true); in synchronize_irq()
137 wait_event(desc->wait_for_threads, in synchronize_irq()
138 !atomic_read(&desc->threads_active)); in synchronize_irq()
146 static bool __irq_can_set_affinity(struct irq_desc *desc) in __irq_can_set_affinity() argument
148 if (!desc || !irqd_can_balance(&desc->irq_data) || in __irq_can_set_affinity()
149 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) in __irq_can_set_affinity()
173 struct irq_desc *desc = irq_to_desc(irq); in irq_can_set_affinity_usr() local
175 return __irq_can_set_affinity(desc) && in irq_can_set_affinity_usr()
176 !irqd_affinity_is_managed(&desc->irq_data); in irq_can_set_affinity_usr()
188 void irq_set_thread_affinity(struct irq_desc *desc) in irq_set_thread_affinity() argument
192 for_each_action_of_desc(desc, action) in irq_set_thread_affinity()
215 struct irq_desc *desc = irq_data_to_desc(data); in irq_do_set_affinity() local
279 cpumask_copy(desc->irq_common_data.affinity, mask); in irq_do_set_affinity()
283 irq_set_thread_affinity(desc); in irq_do_set_affinity()
295 struct irq_desc *desc = irq_data_to_desc(data); in irq_set_affinity_pending() local
298 irq_copy_pending(desc, dest); in irq_set_affinity_pending()
327 struct irq_desc *desc = irq_data_to_desc(data); in irq_set_affinity_deactivated() local
342 cpumask_copy(desc->irq_common_data.affinity, mask); in irq_set_affinity_deactivated()
352 struct irq_desc *desc = irq_data_to_desc(data); in irq_set_affinity_locked() local
365 irq_copy_pending(desc, mask); in irq_set_affinity_locked()
368 if (desc->affinity_notify) { in irq_set_affinity_locked()
369 kref_get(&desc->affinity_notify->kref); in irq_set_affinity_locked()
370 if (!schedule_work(&desc->affinity_notify->work)) { in irq_set_affinity_locked()
372 kref_put(&desc->affinity_notify->kref, in irq_set_affinity_locked()
373 desc->affinity_notify->release); in irq_set_affinity_locked()
399 struct irq_desc *desc; in irq_update_affinity_desc() local
411 desc = irq_get_desc_buslock(irq, &flags, 0); in irq_update_affinity_desc()
412 if (!desc) in irq_update_affinity_desc()
416 if (irqd_is_started(&desc->irq_data)) { in irq_update_affinity_desc()
422 if (irqd_affinity_is_managed(&desc->irq_data)) { in irq_update_affinity_desc()
431 activated = irqd_is_activated(&desc->irq_data); in irq_update_affinity_desc()
433 irq_domain_deactivate_irq(&desc->irq_data); in irq_update_affinity_desc()
436 irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED); in irq_update_affinity_desc()
437 irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN); in irq_update_affinity_desc()
440 cpumask_copy(desc->irq_common_data.affinity, &affinity->mask); in irq_update_affinity_desc()
444 irq_domain_activate_irq(&desc->irq_data, false); in irq_update_affinity_desc()
447 irq_put_desc_busunlock(desc, flags); in irq_update_affinity_desc()
454 struct irq_desc *desc = irq_to_desc(irq); in __irq_set_affinity() local
458 if (!desc) in __irq_set_affinity()
461 raw_spin_lock_irqsave(&desc->lock, flags); in __irq_set_affinity()
462 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force); in __irq_set_affinity()
463 raw_spin_unlock_irqrestore(&desc->lock, flags); in __irq_set_affinity()
501 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); in __irq_apply_affinity_hint() local
503 if (!desc) in __irq_apply_affinity_hint()
505 desc->affinity_hint = m; in __irq_apply_affinity_hint()
506 irq_put_desc_unlock(desc, flags); in __irq_apply_affinity_hint()
517 struct irq_desc *desc = irq_to_desc(notify->irq); in irq_affinity_notify() local
521 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) in irq_affinity_notify()
524 raw_spin_lock_irqsave(&desc->lock, flags); in irq_affinity_notify()
525 if (irq_move_pending(&desc->irq_data)) in irq_affinity_notify()
526 irq_get_pending(cpumask, desc); in irq_affinity_notify()
528 cpumask_copy(cpumask, desc->irq_common_data.affinity); in irq_affinity_notify()
529 raw_spin_unlock_irqrestore(&desc->lock, flags); in irq_affinity_notify()
552 struct irq_desc *desc = irq_to_desc(irq); in irq_set_affinity_notifier() local
559 if (!desc || desc->istate & IRQS_NMI) in irq_set_affinity_notifier()
569 raw_spin_lock_irqsave(&desc->lock, flags); in irq_set_affinity_notifier()
570 old_notify = desc->affinity_notify; in irq_set_affinity_notifier()
571 desc->affinity_notify = notify; in irq_set_affinity_notifier()
572 raw_spin_unlock_irqrestore(&desc->lock, flags); in irq_set_affinity_notifier()
590 int irq_setup_affinity(struct irq_desc *desc) in irq_setup_affinity() argument
593 int ret, node = irq_desc_get_node(desc); in irq_setup_affinity()
598 if (!__irq_can_set_affinity(desc)) in irq_setup_affinity()
606 if (irqd_affinity_is_managed(&desc->irq_data) || in irq_setup_affinity()
607 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { in irq_setup_affinity()
608 if (cpumask_intersects(desc->irq_common_data.affinity, in irq_setup_affinity()
610 set = desc->irq_common_data.affinity; in irq_setup_affinity()
612 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); in irq_setup_affinity()
626 ret = irq_do_set_affinity(&desc->irq_data, &mask, false); in irq_setup_affinity()
632 int irq_setup_affinity(struct irq_desc *desc) in irq_setup_affinity() argument
634 return irq_select_affinity(irq_desc_get_irq(desc)); in irq_setup_affinity()
654 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); in irq_set_vcpu_affinity() local
659 if (!desc) in irq_set_vcpu_affinity()
662 data = irq_desc_get_irq_data(desc); in irq_set_vcpu_affinity()
676 irq_put_desc_unlock(desc, flags); in irq_set_vcpu_affinity()
682 void __disable_irq(struct irq_desc *desc) in __disable_irq() argument
684 if (!desc->depth++) in __disable_irq()
685 irq_disable(desc); in __disable_irq()
691 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); in __disable_irq_nosync() local
693 if (!desc) in __disable_irq_nosync()
695 __disable_irq(desc); in __disable_irq_nosync()
696 irq_put_desc_busunlock(desc, flags); in __disable_irq_nosync()
777 void __enable_irq(struct irq_desc *desc) in __enable_irq() argument
779 switch (desc->depth) { in __enable_irq()
783 irq_desc_get_irq(desc)); in __enable_irq()
786 if (desc->istate & IRQS_SUSPENDED) in __enable_irq()
789 irq_settings_set_noprobe(desc); in __enable_irq()
797 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE); in __enable_irq()
801 desc->depth--; in __enable_irq()
819 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); in enable_irq() local
821 if (!desc) in enable_irq()
823 if (WARN(!desc->irq_data.chip, in enable_irq()
827 __enable_irq(desc); in enable_irq()
829 irq_put_desc_busunlock(desc, flags); in enable_irq()
849 struct irq_desc *desc = irq_to_desc(irq); in set_irq_wake_real() local
852 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE) in set_irq_wake_real()
855 if (desc->irq_data.chip->irq_set_wake) in set_irq_wake_real()
856 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); in set_irq_wake_real()
883 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); in irq_set_irq_wake() local
886 if (!desc) in irq_set_irq_wake()
890 if (desc->istate & IRQS_NMI) { in irq_set_irq_wake()
899 if (desc->wake_depth++ == 0) { in irq_set_irq_wake()
902 desc->wake_depth = 0; in irq_set_irq_wake()
904 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); in irq_set_irq_wake()
907 if (desc->wake_depth == 0) { in irq_set_irq_wake()
909 } else if (--desc->wake_depth == 0) { in irq_set_irq_wake()
912 desc->wake_depth = 1; in irq_set_irq_wake()
914 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); in irq_set_irq_wake()
919 irq_put_desc_busunlock(desc, flags); in irq_set_irq_wake()
932 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); in can_request_irq() local
935 if (!desc) in can_request_irq()
938 if (irq_settings_can_request(desc)) { in can_request_irq()
939 if (!desc->action || in can_request_irq()
940 irqflags & desc->action->flags & IRQF_SHARED) in can_request_irq()
943 irq_put_desc_unlock(desc, flags); in can_request_irq()
947 int __irq_set_trigger(struct irq_desc *desc, unsigned long flags) in __irq_set_trigger() argument
949 struct irq_chip *chip = desc->irq_data.chip; in __irq_set_trigger()
958 irq_desc_get_irq(desc), in __irq_set_trigger()
964 if (!irqd_irq_masked(&desc->irq_data)) in __irq_set_trigger()
965 mask_irq(desc); in __irq_set_trigger()
966 if (!irqd_irq_disabled(&desc->irq_data)) in __irq_set_trigger()
972 ret = chip->irq_set_type(&desc->irq_data, flags); in __irq_set_trigger()
977 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); in __irq_set_trigger()
978 irqd_set(&desc->irq_data, flags); in __irq_set_trigger()
982 flags = irqd_get_trigger_type(&desc->irq_data); in __irq_set_trigger()
983 irq_settings_set_trigger_mask(desc, flags); in __irq_set_trigger()
984 irqd_clear(&desc->irq_data, IRQD_LEVEL); in __irq_set_trigger()
985 irq_settings_clr_level(desc); in __irq_set_trigger()
987 irq_settings_set_level(desc); in __irq_set_trigger()
988 irqd_set(&desc->irq_data, IRQD_LEVEL); in __irq_set_trigger()
995 flags, irq_desc_get_irq(desc), chip->irq_set_type); in __irq_set_trigger()
998 unmask_irq(desc); in __irq_set_trigger()
1006 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); in irq_set_parent() local
1008 if (!desc) in irq_set_parent()
1011 desc->parent_irq = parent_irq; in irq_set_parent()
1013 irq_put_desc_unlock(desc, flags); in irq_set_parent()
1075 static void irq_finalize_oneshot(struct irq_desc *desc, in irq_finalize_oneshot() argument
1078 if (!(desc->istate & IRQS_ONESHOT) || in irq_finalize_oneshot()
1082 chip_bus_lock(desc); in irq_finalize_oneshot()
1083 raw_spin_lock_irq(&desc->lock); in irq_finalize_oneshot()
1099 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { in irq_finalize_oneshot()
1100 raw_spin_unlock_irq(&desc->lock); in irq_finalize_oneshot()
1101 chip_bus_sync_unlock(desc); in irq_finalize_oneshot()
1114 desc->threads_oneshot &= ~action->thread_mask; in irq_finalize_oneshot()
1116 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && in irq_finalize_oneshot()
1117 irqd_irq_masked(&desc->irq_data)) in irq_finalize_oneshot()
1118 unmask_threaded_irq(desc); in irq_finalize_oneshot()
1121 raw_spin_unlock_irq(&desc->lock); in irq_finalize_oneshot()
1122 chip_bus_sync_unlock(desc); in irq_finalize_oneshot()
1130 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) in irq_thread_check_affinity() argument
1147 raw_spin_lock_irq(&desc->lock); in irq_thread_check_affinity()
1152 if (cpumask_available(desc->irq_common_data.affinity)) { in irq_thread_check_affinity()
1155 m = irq_data_get_effective_affinity_mask(&desc->irq_data); in irq_thread_check_affinity()
1160 raw_spin_unlock_irq(&desc->lock); in irq_thread_check_affinity()
1168 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } in irq_thread_check_affinity() argument
1178 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) in irq_forced_thread_fn() argument
1187 atomic_inc(&desc->threads_handled); in irq_forced_thread_fn()
1189 irq_finalize_oneshot(desc, action); in irq_forced_thread_fn()
1201 static irqreturn_t irq_thread_fn(struct irq_desc *desc, in irq_thread_fn() argument
1208 atomic_inc(&desc->threads_handled); in irq_thread_fn()
1210 irq_finalize_oneshot(desc, action); in irq_thread_fn()
1214 static void wake_threads_waitq(struct irq_desc *desc) in wake_threads_waitq() argument
1216 if (atomic_dec_and_test(&desc->threads_active)) in wake_threads_waitq()
1217 wake_up(&desc->wait_for_threads); in wake_threads_waitq()
1223 struct irq_desc *desc; in irq_thread_dtor() local
1235 desc = irq_to_desc(action->irq); in irq_thread_dtor()
1241 wake_threads_waitq(desc); in irq_thread_dtor()
1244 irq_finalize_oneshot(desc, action); in irq_thread_dtor()
1247 static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action) in irq_wake_secondary() argument
1254 raw_spin_lock_irq(&desc->lock); in irq_wake_secondary()
1255 __irq_wake_thread(desc, secondary); in irq_wake_secondary()
1256 raw_spin_unlock_irq(&desc->lock); in irq_wake_secondary()
1262 static void irq_thread_set_ready(struct irq_desc *desc, in irq_thread_set_ready() argument
1266 wake_up(&desc->wait_for_threads); in irq_thread_set_ready()
1273 static void wake_up_and_wait_for_irq_thread_ready(struct irq_desc *desc, in wake_up_and_wait_for_irq_thread_ready() argument
1280 wait_event(desc->wait_for_threads, in wake_up_and_wait_for_irq_thread_ready()
1291 struct irq_desc *desc = irq_to_desc(action->irq); in irq_thread() local
1292 irqreturn_t (*handler_fn)(struct irq_desc *desc, in irq_thread()
1295 irq_thread_set_ready(desc, action); in irq_thread()
1308 irq_thread_check_affinity(desc, action); in irq_thread()
1313 irq_thread_check_affinity(desc, action); in irq_thread()
1315 action_ret = handler_fn(desc, action); in irq_thread()
1317 irq_wake_secondary(desc, action); in irq_thread()
1319 wake_threads_waitq(desc); in irq_thread()
1340 struct irq_desc *desc = irq_to_desc(irq); in irq_wake_thread() local
1344 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) in irq_wake_thread()
1347 raw_spin_lock_irqsave(&desc->lock, flags); in irq_wake_thread()
1348 for_each_action_of_desc(desc, action) { in irq_wake_thread()
1351 __irq_wake_thread(desc, action); in irq_wake_thread()
1355 raw_spin_unlock_irqrestore(&desc->lock, flags); in irq_wake_thread()
1398 static int irq_request_resources(struct irq_desc *desc) in irq_request_resources() argument
1400 struct irq_data *d = &desc->irq_data; in irq_request_resources()
1406 static void irq_release_resources(struct irq_desc *desc) in irq_release_resources() argument
1408 struct irq_data *d = &desc->irq_data; in irq_release_resources()
1415 static bool irq_supports_nmi(struct irq_desc *desc) in irq_supports_nmi() argument
1417 struct irq_data *d = irq_desc_get_irq_data(desc); in irq_supports_nmi()
1431 static int irq_nmi_setup(struct irq_desc *desc) in irq_nmi_setup() argument
1433 struct irq_data *d = irq_desc_get_irq_data(desc); in irq_nmi_setup()
1439 static void irq_nmi_teardown(struct irq_desc *desc) in irq_nmi_teardown() argument
1441 struct irq_data *d = irq_desc_get_irq_data(desc); in irq_nmi_teardown()
1498 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) in __setup_irq() argument
1504 if (!desc) in __setup_irq()
1507 if (desc->irq_data.chip == &no_irq_chip) in __setup_irq()
1509 if (!try_module_get(desc->owner)) in __setup_irq()
1519 new->flags |= irqd_get_trigger_type(&desc->irq_data); in __setup_irq()
1525 nested = irq_settings_is_nested_thread(desc); in __setup_irq()
1538 if (irq_settings_can_thread(desc)) { in __setup_irq()
1570 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE) in __setup_irq()
1580 mutex_lock(&desc->request_mutex); in __setup_irq()
1587 chip_bus_lock(desc); in __setup_irq()
1590 if (!desc->action) { in __setup_irq()
1591 ret = irq_request_resources(desc); in __setup_irq()
1594 new->name, irq, desc->irq_data.chip->name); in __setup_irq()
1605 raw_spin_lock_irqsave(&desc->lock, flags); in __setup_irq()
1606 old_ptr = &desc->action; in __setup_irq()
1619 if (desc->istate & IRQS_NMI) { in __setup_irq()
1621 new->name, irq, desc->irq_data.chip->name); in __setup_irq()
1630 if (irqd_trigger_type_was_set(&desc->irq_data)) { in __setup_irq()
1631 oldtype = irqd_get_trigger_type(&desc->irq_data); in __setup_irq()
1634 irqd_set_trigger_type(&desc->irq_data, oldtype); in __setup_irq()
1698 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) { in __setup_irq()
1723 ret = __irq_set_trigger(desc, in __setup_irq()
1741 ret = irq_activate(desc); in __setup_irq()
1745 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ in __setup_irq()
1747 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); in __setup_irq()
1750 irqd_set(&desc->irq_data, IRQD_PER_CPU); in __setup_irq()
1751 irq_settings_set_per_cpu(desc); in __setup_irq()
1753 irq_settings_set_no_debug(desc); in __setup_irq()
1757 irq_settings_set_no_debug(desc); in __setup_irq()
1760 desc->istate |= IRQS_ONESHOT; in __setup_irq()
1764 irq_settings_set_no_balancing(desc); in __setup_irq()
1765 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); in __setup_irq()
1769 irq_settings_can_autoenable(desc)) { in __setup_irq()
1770 irq_startup(desc, IRQ_RESEND, IRQ_START_COND); in __setup_irq()
1780 desc->depth = 1; in __setup_irq()
1785 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data); in __setup_irq()
1795 irq_pm_install_action(desc, new); in __setup_irq()
1798 desc->irq_count = 0; in __setup_irq()
1799 desc->irqs_unhandled = 0; in __setup_irq()
1805 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { in __setup_irq()
1806 desc->istate &= ~IRQS_SPURIOUS_DISABLED; in __setup_irq()
1807 __enable_irq(desc); in __setup_irq()
1810 raw_spin_unlock_irqrestore(&desc->lock, flags); in __setup_irq()
1811 chip_bus_sync_unlock(desc); in __setup_irq()
1812 mutex_unlock(&desc->request_mutex); in __setup_irq()
1814 irq_setup_timings(desc, new); in __setup_irq()
1816 wake_up_and_wait_for_irq_thread_ready(desc, new); in __setup_irq()
1817 wake_up_and_wait_for_irq_thread_ready(desc, new->secondary); in __setup_irq()
1819 register_irq_proc(irq, desc); in __setup_irq()
1835 raw_spin_unlock_irqrestore(&desc->lock, flags); in __setup_irq()
1837 if (!desc->action) in __setup_irq()
1838 irq_release_resources(desc); in __setup_irq()
1840 chip_bus_sync_unlock(desc); in __setup_irq()
1841 mutex_unlock(&desc->request_mutex); in __setup_irq()
1859 module_put(desc->owner); in __setup_irq()
1867 static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id) in __free_irq() argument
1869 unsigned irq = desc->irq_data.irq; in __free_irq()
1875 mutex_lock(&desc->request_mutex); in __free_irq()
1876 chip_bus_lock(desc); in __free_irq()
1877 raw_spin_lock_irqsave(&desc->lock, flags); in __free_irq()
1883 action_ptr = &desc->action; in __free_irq()
1889 raw_spin_unlock_irqrestore(&desc->lock, flags); in __free_irq()
1890 chip_bus_sync_unlock(desc); in __free_irq()
1891 mutex_unlock(&desc->request_mutex); in __free_irq()
1903 irq_pm_remove_action(desc, action); in __free_irq()
1906 if (!desc->action) { in __free_irq()
1907 irq_settings_clr_disable_unlazy(desc); in __free_irq()
1909 irq_shutdown(desc); in __free_irq()
1914 if (WARN_ON_ONCE(desc->affinity_hint)) in __free_irq()
1915 desc->affinity_hint = NULL; in __free_irq()
1918 raw_spin_unlock_irqrestore(&desc->lock, flags); in __free_irq()
1933 chip_bus_sync_unlock(desc); in __free_irq()
1942 __synchronize_hardirq(desc, true); in __free_irq()
1976 if (!desc->action) { in __free_irq()
1981 chip_bus_lock(desc); in __free_irq()
1986 raw_spin_lock_irqsave(&desc->lock, flags); in __free_irq()
1987 irq_domain_deactivate_irq(&desc->irq_data); in __free_irq()
1988 raw_spin_unlock_irqrestore(&desc->lock, flags); in __free_irq()
1990 irq_release_resources(desc); in __free_irq()
1991 chip_bus_sync_unlock(desc); in __free_irq()
1992 irq_remove_timings(desc); in __free_irq()
1995 mutex_unlock(&desc->request_mutex); in __free_irq()
1997 irq_chip_pm_put(&desc->irq_data); in __free_irq()
1998 module_put(desc->owner); in __free_irq()
2021 struct irq_desc *desc = irq_to_desc(irq); in free_irq() local
2025 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) in free_irq()
2029 if (WARN_ON(desc->affinity_notify)) in free_irq()
2030 desc->affinity_notify = NULL; in free_irq()
2033 action = __free_irq(desc, dev_id); in free_irq()
2045 static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc) in __cleanup_nmi() argument
2049 desc->istate &= ~IRQS_NMI; in __cleanup_nmi()
2051 if (!WARN_ON(desc->action == NULL)) { in __cleanup_nmi()
2052 irq_pm_remove_action(desc, desc->action); in __cleanup_nmi()
2053 devname = desc->action->name; in __cleanup_nmi()
2054 unregister_handler_proc(irq, desc->action); in __cleanup_nmi()
2056 kfree(desc->action); in __cleanup_nmi()
2057 desc->action = NULL; in __cleanup_nmi()
2060 irq_settings_clr_disable_unlazy(desc); in __cleanup_nmi()
2061 irq_shutdown_and_deactivate(desc); in __cleanup_nmi()
2063 irq_release_resources(desc); in __cleanup_nmi()
2065 irq_chip_pm_put(&desc->irq_data); in __cleanup_nmi()
2066 module_put(desc->owner); in __cleanup_nmi()
2073 struct irq_desc *desc = irq_to_desc(irq); in free_nmi() local
2077 if (!desc || WARN_ON(!(desc->istate & IRQS_NMI))) in free_nmi()
2080 if (WARN_ON(irq_settings_is_per_cpu_devid(desc))) in free_nmi()
2084 if (WARN_ON(desc->depth == 0)) in free_nmi()
2087 raw_spin_lock_irqsave(&desc->lock, flags); in free_nmi()
2089 irq_nmi_teardown(desc); in free_nmi()
2090 devname = __cleanup_nmi(irq, desc); in free_nmi()
2092 raw_spin_unlock_irqrestore(&desc->lock, flags); in free_nmi()
2144 struct irq_desc *desc; in request_threaded_irq() local
2169 desc = irq_to_desc(irq); in request_threaded_irq()
2170 if (!desc) in request_threaded_irq()
2173 if (!irq_settings_can_request(desc) || in request_threaded_irq()
2174 WARN_ON(irq_settings_is_per_cpu_devid(desc))) in request_threaded_irq()
2193 retval = irq_chip_pm_get(&desc->irq_data); in request_threaded_irq()
2199 retval = __setup_irq(irq, desc, action); in request_threaded_irq()
2202 irq_chip_pm_put(&desc->irq_data); in request_threaded_irq()
2250 struct irq_desc *desc; in request_any_context_irq() local
2256 desc = irq_to_desc(irq); in request_any_context_irq()
2257 if (!desc) in request_any_context_irq()
2260 if (irq_settings_is_nested_thread(desc)) { in request_any_context_irq()
2301 struct irq_desc *desc; in request_nmi() local
2318 desc = irq_to_desc(irq); in request_nmi()
2320 if (!desc || (irq_settings_can_autoenable(desc) && in request_nmi()
2322 !irq_settings_can_request(desc) || in request_nmi()
2323 WARN_ON(irq_settings_is_per_cpu_devid(desc)) || in request_nmi()
2324 !irq_supports_nmi(desc)) in request_nmi()
2336 retval = irq_chip_pm_get(&desc->irq_data); in request_nmi()
2340 retval = __setup_irq(irq, desc, action); in request_nmi()
2344 raw_spin_lock_irqsave(&desc->lock, flags); in request_nmi()
2347 desc->istate |= IRQS_NMI; in request_nmi()
2348 retval = irq_nmi_setup(desc); in request_nmi()
2350 __cleanup_nmi(irq, desc); in request_nmi()
2351 raw_spin_unlock_irqrestore(&desc->lock, flags); in request_nmi()
2355 raw_spin_unlock_irqrestore(&desc->lock, flags); in request_nmi()
2360 irq_chip_pm_put(&desc->irq_data); in request_nmi()
2371 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); in enable_percpu_irq() local
2373 if (!desc) in enable_percpu_irq()
2382 type = irqd_get_trigger_type(&desc->irq_data); in enable_percpu_irq()
2387 ret = __irq_set_trigger(desc, type); in enable_percpu_irq()
2395 irq_percpu_enable(desc, cpu); in enable_percpu_irq()
2397 irq_put_desc_unlock(desc, flags); in enable_percpu_irq()
2416 struct irq_desc *desc; in irq_percpu_is_enabled() local
2420 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); in irq_percpu_is_enabled()
2421 if (!desc) in irq_percpu_is_enabled()
2424 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled); in irq_percpu_is_enabled()
2425 irq_put_desc_unlock(desc, flags); in irq_percpu_is_enabled()
2435 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); in disable_percpu_irq() local
2437 if (!desc) in disable_percpu_irq()
2440 irq_percpu_disable(desc, cpu); in disable_percpu_irq()
2441 irq_put_desc_unlock(desc, flags); in disable_percpu_irq()
2455 struct irq_desc *desc = irq_to_desc(irq); in __free_percpu_irq() local
2461 if (!desc) in __free_percpu_irq()
2464 raw_spin_lock_irqsave(&desc->lock, flags); in __free_percpu_irq()
2466 action = desc->action; in __free_percpu_irq()
2472 if (!cpumask_empty(desc->percpu_enabled)) { in __free_percpu_irq()
2474 irq, cpumask_first(desc->percpu_enabled)); in __free_percpu_irq()
2479 desc->action = NULL; in __free_percpu_irq()
2481 desc->istate &= ~IRQS_NMI; in __free_percpu_irq()
2483 raw_spin_unlock_irqrestore(&desc->lock, flags); in __free_percpu_irq()
2487 irq_chip_pm_put(&desc->irq_data); in __free_percpu_irq()
2488 module_put(desc->owner); in __free_percpu_irq()
2492 raw_spin_unlock_irqrestore(&desc->lock, flags); in __free_percpu_irq()
2505 struct irq_desc *desc = irq_to_desc(irq); in remove_percpu_irq() local
2507 if (desc && irq_settings_is_per_cpu_devid(desc)) in remove_percpu_irq()
2525 struct irq_desc *desc = irq_to_desc(irq); in free_percpu_irq() local
2527 if (!desc || !irq_settings_is_per_cpu_devid(desc)) in free_percpu_irq()
2530 chip_bus_lock(desc); in free_percpu_irq()
2532 chip_bus_sync_unlock(desc); in free_percpu_irq()
2538 struct irq_desc *desc = irq_to_desc(irq); in free_percpu_nmi() local
2540 if (!desc || !irq_settings_is_per_cpu_devid(desc)) in free_percpu_nmi()
2543 if (WARN_ON(!(desc->istate & IRQS_NMI))) in free_percpu_nmi()
2558 struct irq_desc *desc = irq_to_desc(irq); in setup_percpu_irq() local
2561 if (!desc || !irq_settings_is_per_cpu_devid(desc)) in setup_percpu_irq()
2564 retval = irq_chip_pm_get(&desc->irq_data); in setup_percpu_irq()
2568 retval = __setup_irq(irq, desc, act); in setup_percpu_irq()
2571 irq_chip_pm_put(&desc->irq_data); in setup_percpu_irq()
2598 struct irq_desc *desc; in __request_percpu_irq() local
2604 desc = irq_to_desc(irq); in __request_percpu_irq()
2605 if (!desc || !irq_settings_can_request(desc) || in __request_percpu_irq()
2606 !irq_settings_is_per_cpu_devid(desc)) in __request_percpu_irq()
2621 retval = irq_chip_pm_get(&desc->irq_data); in __request_percpu_irq()
2627 retval = __setup_irq(irq, desc, action); in __request_percpu_irq()
2630 irq_chip_pm_put(&desc->irq_data); in __request_percpu_irq()
2663 struct irq_desc *desc; in request_percpu_nmi() local
2670 desc = irq_to_desc(irq); in request_percpu_nmi()
2672 if (!desc || !irq_settings_can_request(desc) || in request_percpu_nmi()
2673 !irq_settings_is_per_cpu_devid(desc) || in request_percpu_nmi()
2674 irq_settings_can_autoenable(desc) || in request_percpu_nmi()
2675 !irq_supports_nmi(desc)) in request_percpu_nmi()
2679 if (desc->istate & IRQS_NMI) in request_percpu_nmi()
2692 retval = irq_chip_pm_get(&desc->irq_data); in request_percpu_nmi()
2696 retval = __setup_irq(irq, desc, action); in request_percpu_nmi()
2700 raw_spin_lock_irqsave(&desc->lock, flags); in request_percpu_nmi()
2701 desc->istate |= IRQS_NMI; in request_percpu_nmi()
2702 raw_spin_unlock_irqrestore(&desc->lock, flags); in request_percpu_nmi()
2707 irq_chip_pm_put(&desc->irq_data); in request_percpu_nmi()
2730 struct irq_desc *desc; in prepare_percpu_nmi() local
2735 desc = irq_get_desc_lock(irq, &flags, in prepare_percpu_nmi()
2737 if (!desc) in prepare_percpu_nmi()
2740 if (WARN(!(desc->istate & IRQS_NMI), in prepare_percpu_nmi()
2747 ret = irq_nmi_setup(desc); in prepare_percpu_nmi()
2754 irq_put_desc_unlock(desc, flags); in prepare_percpu_nmi()
2773 struct irq_desc *desc; in teardown_percpu_nmi() local
2777 desc = irq_get_desc_lock(irq, &flags, in teardown_percpu_nmi()
2779 if (!desc) in teardown_percpu_nmi()
2782 if (WARN_ON(!(desc->istate & IRQS_NMI))) in teardown_percpu_nmi()
2785 irq_nmi_teardown(desc); in teardown_percpu_nmi()
2787 irq_put_desc_unlock(desc, flags); in teardown_percpu_nmi()
2830 struct irq_desc *desc; in irq_get_irqchip_state() local
2835 desc = irq_get_desc_buslock(irq, &flags, 0); in irq_get_irqchip_state()
2836 if (!desc) in irq_get_irqchip_state()
2839 data = irq_desc_get_irq_data(desc); in irq_get_irqchip_state()
2843 irq_put_desc_busunlock(desc, flags); in irq_get_irqchip_state()
2863 struct irq_desc *desc; in irq_set_irqchip_state() local
2869 desc = irq_get_desc_buslock(irq, &flags, 0); in irq_set_irqchip_state()
2870 if (!desc) in irq_set_irqchip_state()
2873 data = irq_desc_get_irq_data(desc); in irq_set_irqchip_state()
2894 irq_put_desc_busunlock(desc, flags); in irq_set_irqchip_state()
2925 struct irq_desc *desc; in irq_check_status_bit() local
2929 desc = irq_to_desc(irq); in irq_check_status_bit()
2930 if (desc) in irq_check_status_bit()
2931 res = !!(desc->status_use_accessors & bitmask); in irq_check_status_bit()