• Home
  • Raw
  • Download

Lines Matching refs:desc

39 static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)  in __synchronize_hardirq()  argument
41 struct irq_data *irqd = irq_desc_get_irq_data(desc); in __synchronize_hardirq()
51 while (irqd_irq_inprogress(&desc->irq_data)) in __synchronize_hardirq()
55 raw_spin_lock_irqsave(&desc->lock, flags); in __synchronize_hardirq()
56 inprogress = irqd_irq_inprogress(&desc->irq_data); in __synchronize_hardirq()
71 raw_spin_unlock_irqrestore(&desc->lock, flags); in __synchronize_hardirq()
101 struct irq_desc *desc = irq_to_desc(irq); in synchronize_hardirq() local
103 if (desc) { in synchronize_hardirq()
104 __synchronize_hardirq(desc, false); in synchronize_hardirq()
105 return !atomic_read(&desc->threads_active); in synchronize_hardirq()
129 struct irq_desc *desc = irq_to_desc(irq); in synchronize_irq() local
131 if (desc) { in synchronize_irq()
132 __synchronize_hardirq(desc, true); in synchronize_irq()
138 wait_event(desc->wait_for_threads, in synchronize_irq()
139 !atomic_read(&desc->threads_active)); in synchronize_irq()
147 static bool __irq_can_set_affinity(struct irq_desc *desc) in __irq_can_set_affinity() argument
149 if (!desc || !irqd_can_balance(&desc->irq_data) || in __irq_can_set_affinity()
150 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) in __irq_can_set_affinity()
174 struct irq_desc *desc = irq_to_desc(irq); in irq_can_set_affinity_usr() local
176 return __irq_can_set_affinity(desc) && in irq_can_set_affinity_usr()
177 !irqd_affinity_is_managed(&desc->irq_data); in irq_can_set_affinity_usr()
189 void irq_set_thread_affinity(struct irq_desc *desc) in irq_set_thread_affinity() argument
193 for_each_action_of_desc(desc, action) in irq_set_thread_affinity()
224 struct irq_desc *desc = irq_data_to_desc(data); in irq_do_set_affinity() local
288 cpumask_copy(desc->irq_common_data.affinity, mask); in irq_do_set_affinity()
292 irq_set_thread_affinity(desc); in irq_do_set_affinity()
304 struct irq_desc *desc = irq_data_to_desc(data); in irq_set_affinity_pending() local
307 irq_copy_pending(desc, dest); in irq_set_affinity_pending()
336 struct irq_desc *desc = irq_data_to_desc(data); in irq_set_affinity_deactivated() local
351 cpumask_copy(desc->irq_common_data.affinity, mask); in irq_set_affinity_deactivated()
361 struct irq_desc *desc = irq_data_to_desc(data); in irq_set_affinity_locked() local
374 irq_copy_pending(desc, mask); in irq_set_affinity_locked()
377 if (desc->affinity_notify) { in irq_set_affinity_locked()
378 kref_get(&desc->affinity_notify->kref); in irq_set_affinity_locked()
379 if (!schedule_work(&desc->affinity_notify->work)) { in irq_set_affinity_locked()
381 kref_put(&desc->affinity_notify->kref, in irq_set_affinity_locked()
382 desc->affinity_notify->release); in irq_set_affinity_locked()
392 struct irq_desc *desc = irq_to_desc(irq); in __irq_set_affinity() local
396 if (!desc) in __irq_set_affinity()
399 raw_spin_lock_irqsave(&desc->lock, flags); in __irq_set_affinity()
400 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force); in __irq_set_affinity()
401 raw_spin_unlock_irqrestore(&desc->lock, flags); in __irq_set_affinity()
408 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); in irq_set_affinity_hint() local
410 if (!desc) in irq_set_affinity_hint()
412 desc->affinity_hint = m; in irq_set_affinity_hint()
413 irq_put_desc_unlock(desc, flags); in irq_set_affinity_hint()
425 struct irq_desc *desc = irq_to_desc(notify->irq); in irq_affinity_notify() local
429 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) in irq_affinity_notify()
432 raw_spin_lock_irqsave(&desc->lock, flags); in irq_affinity_notify()
433 if (irq_move_pending(&desc->irq_data)) in irq_affinity_notify()
434 irq_get_pending(cpumask, desc); in irq_affinity_notify()
436 cpumask_copy(cpumask, desc->irq_common_data.affinity); in irq_affinity_notify()
437 raw_spin_unlock_irqrestore(&desc->lock, flags); in irq_affinity_notify()
460 struct irq_desc *desc = irq_to_desc(irq); in irq_set_affinity_notifier() local
467 if (!desc || desc->istate & IRQS_NMI) in irq_set_affinity_notifier()
477 raw_spin_lock_irqsave(&desc->lock, flags); in irq_set_affinity_notifier()
478 old_notify = desc->affinity_notify; in irq_set_affinity_notifier()
479 desc->affinity_notify = notify; in irq_set_affinity_notifier()
480 raw_spin_unlock_irqrestore(&desc->lock, flags); in irq_set_affinity_notifier()
498 int irq_setup_affinity(struct irq_desc *desc) in irq_setup_affinity() argument
501 int ret, node = irq_desc_get_node(desc); in irq_setup_affinity()
506 if (!__irq_can_set_affinity(desc)) in irq_setup_affinity()
514 if (irqd_affinity_is_managed(&desc->irq_data) || in irq_setup_affinity()
515 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { in irq_setup_affinity()
516 if (cpumask_intersects(desc->irq_common_data.affinity, in irq_setup_affinity()
518 set = desc->irq_common_data.affinity; in irq_setup_affinity()
520 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); in irq_setup_affinity()
534 ret = irq_do_set_affinity(&desc->irq_data, &mask, false); in irq_setup_affinity()
540 int irq_setup_affinity(struct irq_desc *desc) in irq_setup_affinity() argument
542 return irq_select_affinity(irq_desc_get_irq(desc)); in irq_setup_affinity()
562 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); in irq_set_vcpu_affinity() local
567 if (!desc) in irq_set_vcpu_affinity()
570 data = irq_desc_get_irq_data(desc); in irq_set_vcpu_affinity()
584 irq_put_desc_unlock(desc, flags); in irq_set_vcpu_affinity()
590 void __disable_irq(struct irq_desc *desc) in __disable_irq() argument
592 if (!desc->depth++) in __disable_irq()
593 irq_disable(desc); in __disable_irq()
599 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); in __disable_irq_nosync() local
601 if (!desc) in __disable_irq_nosync()
603 __disable_irq(desc); in __disable_irq_nosync()
604 irq_put_desc_busunlock(desc, flags); in __disable_irq_nosync()
685 void __enable_irq(struct irq_desc *desc) in __enable_irq() argument
687 switch (desc->depth) { in __enable_irq()
691 irq_desc_get_irq(desc)); in __enable_irq()
694 if (desc->istate & IRQS_SUSPENDED) in __enable_irq()
697 irq_settings_set_noprobe(desc); in __enable_irq()
705 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE); in __enable_irq()
709 desc->depth--; in __enable_irq()
727 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); in enable_irq() local
729 if (!desc) in enable_irq()
731 if (WARN(!desc->irq_data.chip, in enable_irq()
735 __enable_irq(desc); in enable_irq()
737 irq_put_desc_busunlock(desc, flags); in enable_irq()
757 struct irq_desc *desc = irq_to_desc(irq); in set_irq_wake_real() local
760 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE) in set_irq_wake_real()
763 if (desc->irq_data.chip->irq_set_wake) in set_irq_wake_real()
764 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); in set_irq_wake_real()
791 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); in irq_set_irq_wake() local
794 if (!desc) in irq_set_irq_wake()
798 if (desc->istate & IRQS_NMI) { in irq_set_irq_wake()
807 if (desc->wake_depth++ == 0) { in irq_set_irq_wake()
810 desc->wake_depth = 0; in irq_set_irq_wake()
812 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); in irq_set_irq_wake()
815 if (desc->wake_depth == 0) { in irq_set_irq_wake()
817 } else if (--desc->wake_depth == 0) { in irq_set_irq_wake()
820 desc->wake_depth = 1; in irq_set_irq_wake()
822 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); in irq_set_irq_wake()
827 irq_put_desc_busunlock(desc, flags); in irq_set_irq_wake()
840 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); in can_request_irq() local
843 if (!desc) in can_request_irq()
846 if (irq_settings_can_request(desc)) { in can_request_irq()
847 if (!desc->action || in can_request_irq()
848 irqflags & desc->action->flags & IRQF_SHARED) in can_request_irq()
851 irq_put_desc_unlock(desc, flags); in can_request_irq()
855 int __irq_set_trigger(struct irq_desc *desc, unsigned long flags) in __irq_set_trigger() argument
857 struct irq_chip *chip = desc->irq_data.chip; in __irq_set_trigger()
866 irq_desc_get_irq(desc), in __irq_set_trigger()
872 if (!irqd_irq_masked(&desc->irq_data)) in __irq_set_trigger()
873 mask_irq(desc); in __irq_set_trigger()
874 if (!irqd_irq_disabled(&desc->irq_data)) in __irq_set_trigger()
880 ret = chip->irq_set_type(&desc->irq_data, flags); in __irq_set_trigger()
885 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); in __irq_set_trigger()
886 irqd_set(&desc->irq_data, flags); in __irq_set_trigger()
890 flags = irqd_get_trigger_type(&desc->irq_data); in __irq_set_trigger()
891 irq_settings_set_trigger_mask(desc, flags); in __irq_set_trigger()
892 irqd_clear(&desc->irq_data, IRQD_LEVEL); in __irq_set_trigger()
893 irq_settings_clr_level(desc); in __irq_set_trigger()
895 irq_settings_set_level(desc); in __irq_set_trigger()
896 irqd_set(&desc->irq_data, IRQD_LEVEL); in __irq_set_trigger()
903 flags, irq_desc_get_irq(desc), chip->irq_set_type); in __irq_set_trigger()
906 unmask_irq(desc); in __irq_set_trigger()
914 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); in irq_set_parent() local
916 if (!desc) in irq_set_parent()
919 desc->parent_irq = parent_irq; in irq_set_parent()
921 irq_put_desc_unlock(desc, flags); in irq_set_parent()
983 static void irq_finalize_oneshot(struct irq_desc *desc, in irq_finalize_oneshot() argument
986 if (!(desc->istate & IRQS_ONESHOT) || in irq_finalize_oneshot()
990 chip_bus_lock(desc); in irq_finalize_oneshot()
991 raw_spin_lock_irq(&desc->lock); in irq_finalize_oneshot()
1007 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { in irq_finalize_oneshot()
1008 raw_spin_unlock_irq(&desc->lock); in irq_finalize_oneshot()
1009 chip_bus_sync_unlock(desc); in irq_finalize_oneshot()
1022 desc->threads_oneshot &= ~action->thread_mask; in irq_finalize_oneshot()
1024 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && in irq_finalize_oneshot()
1025 irqd_irq_masked(&desc->irq_data)) in irq_finalize_oneshot()
1026 unmask_threaded_irq(desc); in irq_finalize_oneshot()
1029 raw_spin_unlock_irq(&desc->lock); in irq_finalize_oneshot()
1030 chip_bus_sync_unlock(desc); in irq_finalize_oneshot()
1038 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) in irq_thread_check_affinity() argument
1055 raw_spin_lock_irq(&desc->lock); in irq_thread_check_affinity()
1060 if (cpumask_available(desc->irq_common_data.affinity)) { in irq_thread_check_affinity()
1063 m = irq_data_get_effective_affinity_mask(&desc->irq_data); in irq_thread_check_affinity()
1068 raw_spin_unlock_irq(&desc->lock); in irq_thread_check_affinity()
1076 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } in irq_thread_check_affinity() argument
1086 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) in irq_forced_thread_fn() argument
1095 atomic_inc(&desc->threads_handled); in irq_forced_thread_fn()
1097 irq_finalize_oneshot(desc, action); in irq_forced_thread_fn()
1109 static irqreturn_t irq_thread_fn(struct irq_desc *desc, in irq_thread_fn() argument
1116 atomic_inc(&desc->threads_handled); in irq_thread_fn()
1118 irq_finalize_oneshot(desc, action); in irq_thread_fn()
1122 static void wake_threads_waitq(struct irq_desc *desc) in wake_threads_waitq() argument
1124 if (atomic_dec_and_test(&desc->threads_active)) in wake_threads_waitq()
1125 wake_up(&desc->wait_for_threads); in wake_threads_waitq()
1131 struct irq_desc *desc; in irq_thread_dtor() local
1143 desc = irq_to_desc(action->irq); in irq_thread_dtor()
1149 wake_threads_waitq(desc); in irq_thread_dtor()
1152 irq_finalize_oneshot(desc, action); in irq_thread_dtor()
1155 static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action) in irq_wake_secondary() argument
1162 raw_spin_lock_irq(&desc->lock); in irq_wake_secondary()
1163 __irq_wake_thread(desc, secondary); in irq_wake_secondary()
1164 raw_spin_unlock_irq(&desc->lock); in irq_wake_secondary()
1170 static void irq_thread_set_ready(struct irq_desc *desc, in irq_thread_set_ready() argument
1174 wake_up(&desc->wait_for_threads); in irq_thread_set_ready()
1181 static void wake_up_and_wait_for_irq_thread_ready(struct irq_desc *desc, in wake_up_and_wait_for_irq_thread_ready() argument
1188 wait_event(desc->wait_for_threads, in wake_up_and_wait_for_irq_thread_ready()
1199 struct irq_desc *desc = irq_to_desc(action->irq); in irq_thread() local
1200 irqreturn_t (*handler_fn)(struct irq_desc *desc, in irq_thread()
1203 irq_thread_set_ready(desc, action); in irq_thread()
1214 irq_thread_check_affinity(desc, action); in irq_thread()
1219 irq_thread_check_affinity(desc, action); in irq_thread()
1221 action_ret = handler_fn(desc, action); in irq_thread()
1223 irq_wake_secondary(desc, action); in irq_thread()
1225 wake_threads_waitq(desc); in irq_thread()
1246 struct irq_desc *desc = irq_to_desc(irq); in irq_wake_thread() local
1250 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) in irq_wake_thread()
1253 raw_spin_lock_irqsave(&desc->lock, flags); in irq_wake_thread()
1254 for_each_action_of_desc(desc, action) { in irq_wake_thread()
1257 __irq_wake_thread(desc, action); in irq_wake_thread()
1261 raw_spin_unlock_irqrestore(&desc->lock, flags); in irq_wake_thread()
1304 static int irq_request_resources(struct irq_desc *desc) in irq_request_resources() argument
1306 struct irq_data *d = &desc->irq_data; in irq_request_resources()
1312 static void irq_release_resources(struct irq_desc *desc) in irq_release_resources() argument
1314 struct irq_data *d = &desc->irq_data; in irq_release_resources()
1321 static bool irq_supports_nmi(struct irq_desc *desc) in irq_supports_nmi() argument
1323 struct irq_data *d = irq_desc_get_irq_data(desc); in irq_supports_nmi()
1337 static int irq_nmi_setup(struct irq_desc *desc) in irq_nmi_setup() argument
1339 struct irq_data *d = irq_desc_get_irq_data(desc); in irq_nmi_setup()
1345 static void irq_nmi_teardown(struct irq_desc *desc) in irq_nmi_teardown() argument
1347 struct irq_data *d = irq_desc_get_irq_data(desc); in irq_nmi_teardown()
1406 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) in __setup_irq() argument
1412 if (!desc) in __setup_irq()
1415 if (desc->irq_data.chip == &no_irq_chip) in __setup_irq()
1417 if (!try_module_get(desc->owner)) in __setup_irq()
1427 new->flags |= irqd_get_trigger_type(&desc->irq_data); in __setup_irq()
1433 nested = irq_settings_is_nested_thread(desc); in __setup_irq()
1446 if (irq_settings_can_thread(desc)) { in __setup_irq()
1478 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE) in __setup_irq()
1488 mutex_lock(&desc->request_mutex); in __setup_irq()
1495 chip_bus_lock(desc); in __setup_irq()
1498 if (!desc->action) { in __setup_irq()
1499 ret = irq_request_resources(desc); in __setup_irq()
1502 new->name, irq, desc->irq_data.chip->name); in __setup_irq()
1513 raw_spin_lock_irqsave(&desc->lock, flags); in __setup_irq()
1514 old_ptr = &desc->action; in __setup_irq()
1527 if (desc->istate & IRQS_NMI) { in __setup_irq()
1529 new->name, irq, desc->irq_data.chip->name); in __setup_irq()
1538 if (irqd_trigger_type_was_set(&desc->irq_data)) { in __setup_irq()
1539 oldtype = irqd_get_trigger_type(&desc->irq_data); in __setup_irq()
1542 irqd_set_trigger_type(&desc->irq_data, oldtype); in __setup_irq()
1606 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) { in __setup_irq()
1631 ret = __irq_set_trigger(desc, in __setup_irq()
1649 ret = irq_activate(desc); in __setup_irq()
1653 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ in __setup_irq()
1655 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); in __setup_irq()
1658 irqd_set(&desc->irq_data, IRQD_PER_CPU); in __setup_irq()
1659 irq_settings_set_per_cpu(desc); in __setup_irq()
1663 desc->istate |= IRQS_ONESHOT; in __setup_irq()
1667 irq_settings_set_no_balancing(desc); in __setup_irq()
1668 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); in __setup_irq()
1672 irq_settings_can_autoenable(desc)) { in __setup_irq()
1673 irq_startup(desc, IRQ_RESEND, IRQ_START_COND); in __setup_irq()
1683 desc->depth = 1; in __setup_irq()
1688 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data); in __setup_irq()
1698 irq_pm_install_action(desc, new); in __setup_irq()
1701 desc->irq_count = 0; in __setup_irq()
1702 desc->irqs_unhandled = 0; in __setup_irq()
1708 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { in __setup_irq()
1709 desc->istate &= ~IRQS_SPURIOUS_DISABLED; in __setup_irq()
1710 __enable_irq(desc); in __setup_irq()
1713 raw_spin_unlock_irqrestore(&desc->lock, flags); in __setup_irq()
1714 chip_bus_sync_unlock(desc); in __setup_irq()
1715 mutex_unlock(&desc->request_mutex); in __setup_irq()
1717 irq_setup_timings(desc, new); in __setup_irq()
1719 wake_up_and_wait_for_irq_thread_ready(desc, new); in __setup_irq()
1720 wake_up_and_wait_for_irq_thread_ready(desc, new->secondary); in __setup_irq()
1722 register_irq_proc(irq, desc); in __setup_irq()
1738 raw_spin_unlock_irqrestore(&desc->lock, flags); in __setup_irq()
1740 if (!desc->action) in __setup_irq()
1741 irq_release_resources(desc); in __setup_irq()
1743 chip_bus_sync_unlock(desc); in __setup_irq()
1744 mutex_unlock(&desc->request_mutex); in __setup_irq()
1762 module_put(desc->owner); in __setup_irq()
1770 static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id) in __free_irq() argument
1772 unsigned irq = desc->irq_data.irq; in __free_irq()
1778 mutex_lock(&desc->request_mutex); in __free_irq()
1779 chip_bus_lock(desc); in __free_irq()
1780 raw_spin_lock_irqsave(&desc->lock, flags); in __free_irq()
1786 action_ptr = &desc->action; in __free_irq()
1792 raw_spin_unlock_irqrestore(&desc->lock, flags); in __free_irq()
1793 chip_bus_sync_unlock(desc); in __free_irq()
1794 mutex_unlock(&desc->request_mutex); in __free_irq()
1806 irq_pm_remove_action(desc, action); in __free_irq()
1809 if (!desc->action) { in __free_irq()
1810 irq_settings_clr_disable_unlazy(desc); in __free_irq()
1812 irq_shutdown(desc); in __free_irq()
1817 if (WARN_ON_ONCE(desc->affinity_hint)) in __free_irq()
1818 desc->affinity_hint = NULL; in __free_irq()
1821 raw_spin_unlock_irqrestore(&desc->lock, flags); in __free_irq()
1836 chip_bus_sync_unlock(desc); in __free_irq()
1845 __synchronize_hardirq(desc, true); in __free_irq()
1879 if (!desc->action) { in __free_irq()
1884 chip_bus_lock(desc); in __free_irq()
1889 raw_spin_lock_irqsave(&desc->lock, flags); in __free_irq()
1890 irq_domain_deactivate_irq(&desc->irq_data); in __free_irq()
1891 raw_spin_unlock_irqrestore(&desc->lock, flags); in __free_irq()
1893 irq_release_resources(desc); in __free_irq()
1894 chip_bus_sync_unlock(desc); in __free_irq()
1895 irq_remove_timings(desc); in __free_irq()
1898 mutex_unlock(&desc->request_mutex); in __free_irq()
1900 irq_chip_pm_put(&desc->irq_data); in __free_irq()
1901 module_put(desc->owner); in __free_irq()
1924 struct irq_desc *desc = irq_to_desc(irq); in free_irq() local
1928 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) in free_irq()
1932 if (WARN_ON(desc->affinity_notify)) in free_irq()
1933 desc->affinity_notify = NULL; in free_irq()
1936 action = __free_irq(desc, dev_id); in free_irq()
1948 static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc) in __cleanup_nmi() argument
1952 desc->istate &= ~IRQS_NMI; in __cleanup_nmi()
1954 if (!WARN_ON(desc->action == NULL)) { in __cleanup_nmi()
1955 irq_pm_remove_action(desc, desc->action); in __cleanup_nmi()
1956 devname = desc->action->name; in __cleanup_nmi()
1957 unregister_handler_proc(irq, desc->action); in __cleanup_nmi()
1959 kfree(desc->action); in __cleanup_nmi()
1960 desc->action = NULL; in __cleanup_nmi()
1963 irq_settings_clr_disable_unlazy(desc); in __cleanup_nmi()
1964 irq_shutdown_and_deactivate(desc); in __cleanup_nmi()
1966 irq_release_resources(desc); in __cleanup_nmi()
1968 irq_chip_pm_put(&desc->irq_data); in __cleanup_nmi()
1969 module_put(desc->owner); in __cleanup_nmi()
1976 struct irq_desc *desc = irq_to_desc(irq); in free_nmi() local
1980 if (!desc || WARN_ON(!(desc->istate & IRQS_NMI))) in free_nmi()
1983 if (WARN_ON(irq_settings_is_per_cpu_devid(desc))) in free_nmi()
1987 if (WARN_ON(desc->depth == 0)) in free_nmi()
1990 raw_spin_lock_irqsave(&desc->lock, flags); in free_nmi()
1992 irq_nmi_teardown(desc); in free_nmi()
1993 devname = __cleanup_nmi(irq, desc); in free_nmi()
1995 raw_spin_unlock_irqrestore(&desc->lock, flags); in free_nmi()
2047 struct irq_desc *desc; in request_threaded_irq() local
2072 desc = irq_to_desc(irq); in request_threaded_irq()
2073 if (!desc) in request_threaded_irq()
2076 if (!irq_settings_can_request(desc) || in request_threaded_irq()
2077 WARN_ON(irq_settings_is_per_cpu_devid(desc))) in request_threaded_irq()
2096 retval = irq_chip_pm_get(&desc->irq_data); in request_threaded_irq()
2102 retval = __setup_irq(irq, desc, action); in request_threaded_irq()
2105 irq_chip_pm_put(&desc->irq_data); in request_threaded_irq()
2153 struct irq_desc *desc; in request_any_context_irq() local
2159 desc = irq_to_desc(irq); in request_any_context_irq()
2160 if (!desc) in request_any_context_irq()
2163 if (irq_settings_is_nested_thread(desc)) { in request_any_context_irq()
2204 struct irq_desc *desc; in request_nmi() local
2221 desc = irq_to_desc(irq); in request_nmi()
2223 if (!desc || (irq_settings_can_autoenable(desc) && in request_nmi()
2225 !irq_settings_can_request(desc) || in request_nmi()
2226 WARN_ON(irq_settings_is_per_cpu_devid(desc)) || in request_nmi()
2227 !irq_supports_nmi(desc)) in request_nmi()
2239 retval = irq_chip_pm_get(&desc->irq_data); in request_nmi()
2243 retval = __setup_irq(irq, desc, action); in request_nmi()
2247 raw_spin_lock_irqsave(&desc->lock, flags); in request_nmi()
2250 desc->istate |= IRQS_NMI; in request_nmi()
2251 retval = irq_nmi_setup(desc); in request_nmi()
2253 __cleanup_nmi(irq, desc); in request_nmi()
2254 raw_spin_unlock_irqrestore(&desc->lock, flags); in request_nmi()
2258 raw_spin_unlock_irqrestore(&desc->lock, flags); in request_nmi()
2263 irq_chip_pm_put(&desc->irq_data); in request_nmi()
2274 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); in enable_percpu_irq() local
2276 if (!desc) in enable_percpu_irq()
2285 type = irqd_get_trigger_type(&desc->irq_data); in enable_percpu_irq()
2290 ret = __irq_set_trigger(desc, type); in enable_percpu_irq()
2298 irq_percpu_enable(desc, cpu); in enable_percpu_irq()
2300 irq_put_desc_unlock(desc, flags); in enable_percpu_irq()
2319 struct irq_desc *desc; in irq_percpu_is_enabled() local
2323 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); in irq_percpu_is_enabled()
2324 if (!desc) in irq_percpu_is_enabled()
2327 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled); in irq_percpu_is_enabled()
2328 irq_put_desc_unlock(desc, flags); in irq_percpu_is_enabled()
2338 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); in disable_percpu_irq() local
2340 if (!desc) in disable_percpu_irq()
2343 irq_percpu_disable(desc, cpu); in disable_percpu_irq()
2344 irq_put_desc_unlock(desc, flags); in disable_percpu_irq()
2358 struct irq_desc *desc = irq_to_desc(irq); in __free_percpu_irq() local
2364 if (!desc) in __free_percpu_irq()
2367 raw_spin_lock_irqsave(&desc->lock, flags); in __free_percpu_irq()
2369 action = desc->action; in __free_percpu_irq()
2375 if (!cpumask_empty(desc->percpu_enabled)) { in __free_percpu_irq()
2377 irq, cpumask_first(desc->percpu_enabled)); in __free_percpu_irq()
2382 desc->action = NULL; in __free_percpu_irq()
2384 desc->istate &= ~IRQS_NMI; in __free_percpu_irq()
2386 raw_spin_unlock_irqrestore(&desc->lock, flags); in __free_percpu_irq()
2390 irq_chip_pm_put(&desc->irq_data); in __free_percpu_irq()
2391 module_put(desc->owner); in __free_percpu_irq()
2395 raw_spin_unlock_irqrestore(&desc->lock, flags); in __free_percpu_irq()
2408 struct irq_desc *desc = irq_to_desc(irq); in remove_percpu_irq() local
2410 if (desc && irq_settings_is_per_cpu_devid(desc)) in remove_percpu_irq()
2428 struct irq_desc *desc = irq_to_desc(irq); in free_percpu_irq() local
2430 if (!desc || !irq_settings_is_per_cpu_devid(desc)) in free_percpu_irq()
2433 chip_bus_lock(desc); in free_percpu_irq()
2435 chip_bus_sync_unlock(desc); in free_percpu_irq()
2441 struct irq_desc *desc = irq_to_desc(irq); in free_percpu_nmi() local
2443 if (!desc || !irq_settings_is_per_cpu_devid(desc)) in free_percpu_nmi()
2446 if (WARN_ON(!(desc->istate & IRQS_NMI))) in free_percpu_nmi()
2461 struct irq_desc *desc = irq_to_desc(irq); in setup_percpu_irq() local
2464 if (!desc || !irq_settings_is_per_cpu_devid(desc)) in setup_percpu_irq()
2467 retval = irq_chip_pm_get(&desc->irq_data); in setup_percpu_irq()
2471 retval = __setup_irq(irq, desc, act); in setup_percpu_irq()
2474 irq_chip_pm_put(&desc->irq_data); in setup_percpu_irq()
2501 struct irq_desc *desc; in __request_percpu_irq() local
2507 desc = irq_to_desc(irq); in __request_percpu_irq()
2508 if (!desc || !irq_settings_can_request(desc) || in __request_percpu_irq()
2509 !irq_settings_is_per_cpu_devid(desc)) in __request_percpu_irq()
2524 retval = irq_chip_pm_get(&desc->irq_data); in __request_percpu_irq()
2530 retval = __setup_irq(irq, desc, action); in __request_percpu_irq()
2533 irq_chip_pm_put(&desc->irq_data); in __request_percpu_irq()
2566 struct irq_desc *desc; in request_percpu_nmi() local
2573 desc = irq_to_desc(irq); in request_percpu_nmi()
2575 if (!desc || !irq_settings_can_request(desc) || in request_percpu_nmi()
2576 !irq_settings_is_per_cpu_devid(desc) || in request_percpu_nmi()
2577 irq_settings_can_autoenable(desc) || in request_percpu_nmi()
2578 !irq_supports_nmi(desc)) in request_percpu_nmi()
2582 if (desc->istate & IRQS_NMI) in request_percpu_nmi()
2595 retval = irq_chip_pm_get(&desc->irq_data); in request_percpu_nmi()
2599 retval = __setup_irq(irq, desc, action); in request_percpu_nmi()
2603 raw_spin_lock_irqsave(&desc->lock, flags); in request_percpu_nmi()
2604 desc->istate |= IRQS_NMI; in request_percpu_nmi()
2605 raw_spin_unlock_irqrestore(&desc->lock, flags); in request_percpu_nmi()
2610 irq_chip_pm_put(&desc->irq_data); in request_percpu_nmi()
2633 struct irq_desc *desc; in prepare_percpu_nmi() local
2638 desc = irq_get_desc_lock(irq, &flags, in prepare_percpu_nmi()
2640 if (!desc) in prepare_percpu_nmi()
2643 if (WARN(!(desc->istate & IRQS_NMI), in prepare_percpu_nmi()
2650 ret = irq_nmi_setup(desc); in prepare_percpu_nmi()
2657 irq_put_desc_unlock(desc, flags); in prepare_percpu_nmi()
2676 struct irq_desc *desc; in teardown_percpu_nmi() local
2680 desc = irq_get_desc_lock(irq, &flags, in teardown_percpu_nmi()
2682 if (!desc) in teardown_percpu_nmi()
2685 if (WARN_ON(!(desc->istate & IRQS_NMI))) in teardown_percpu_nmi()
2688 irq_nmi_teardown(desc); in teardown_percpu_nmi()
2690 irq_put_desc_unlock(desc, flags); in teardown_percpu_nmi()
2733 struct irq_desc *desc; in irq_get_irqchip_state() local
2738 desc = irq_get_desc_buslock(irq, &flags, 0); in irq_get_irqchip_state()
2739 if (!desc) in irq_get_irqchip_state()
2742 data = irq_desc_get_irq_data(desc); in irq_get_irqchip_state()
2746 irq_put_desc_busunlock(desc, flags); in irq_get_irqchip_state()
2766 struct irq_desc *desc; in irq_set_irqchip_state() local
2772 desc = irq_get_desc_buslock(irq, &flags, 0); in irq_set_irqchip_state()
2773 if (!desc) in irq_set_irqchip_state()
2776 data = irq_desc_get_irq_data(desc); in irq_set_irqchip_state()
2797 irq_put_desc_busunlock(desc, flags); in irq_set_irqchip_state()