1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4 * Copyright (C) 2005-2006 Thomas Gleixner
5 *
6 * This file contains driver APIs to the irq subsystem.
7 */
8
9 #define pr_fmt(fmt) "genirq: " fmt
10
11 #include <linux/irq.h>
12 #include <linux/kthread.h>
13 #include <linux/module.h>
14 #include <linux/random.h>
15 #include <linux/interrupt.h>
16 #include <linux/irqdomain.h>
17 #include <linux/slab.h>
18 #include <linux/sched.h>
19 #include <linux/sched/rt.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/isolation.h>
22 #include <uapi/linux/sched/types.h>
23 #include <linux/task_work.h>
24
25 #include "internals.h"
26
27 #if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT)
28 __read_mostly bool force_irqthreads;
29 EXPORT_SYMBOL_GPL(force_irqthreads);
30
setup_forced_irqthreads(char * arg)31 static int __init setup_forced_irqthreads(char *arg)
32 {
33 force_irqthreads = true;
34 return 0;
35 }
36 early_param("threadirqs", setup_forced_irqthreads);
37 #endif
38
__synchronize_hardirq(struct irq_desc * desc,bool sync_chip)39 static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
40 {
41 struct irq_data *irqd = irq_desc_get_irq_data(desc);
42 bool inprogress;
43
44 do {
45 unsigned long flags;
46
47 /*
48 * Wait until we're out of the critical section. This might
49 * give the wrong answer due to the lack of memory barriers.
50 */
51 while (irqd_irq_inprogress(&desc->irq_data))
52 cpu_relax();
53
54 /* Ok, that indicated we're done: double-check carefully. */
55 raw_spin_lock_irqsave(&desc->lock, flags);
56 inprogress = irqd_irq_inprogress(&desc->irq_data);
57
58 /*
59 * If requested and supported, check at the chip whether it
60 * is in flight at the hardware level, i.e. already pending
61 * in a CPU and waiting for service and acknowledge.
62 */
63 if (!inprogress && sync_chip) {
64 /*
65 * Ignore the return code. inprogress is only updated
66 * when the chip supports it.
67 */
68 __irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE,
69 &inprogress);
70 }
71 raw_spin_unlock_irqrestore(&desc->lock, flags);
72
73 /* Oops, that failed? */
74 } while (inprogress);
75 }
76
77 /**
78 * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
79 * @irq: interrupt number to wait for
80 *
81 * This function waits for any pending hard IRQ handlers for this
82 * interrupt to complete before returning. If you use this
83 * function while holding a resource the IRQ handler may need you
84 * will deadlock. It does not take associated threaded handlers
85 * into account.
86 *
87 * Do not use this for shutdown scenarios where you must be sure
88 * that all parts (hardirq and threaded handler) have completed.
89 *
90 * Returns: false if a threaded handler is active.
91 *
92 * This function may be called - with care - from IRQ context.
93 *
94 * It does not check whether there is an interrupt in flight at the
95 * hardware level, but not serviced yet, as this might deadlock when
96 * called with interrupts disabled and the target CPU of the interrupt
97 * is the current CPU.
98 */
synchronize_hardirq(unsigned int irq)99 bool synchronize_hardirq(unsigned int irq)
100 {
101 struct irq_desc *desc = irq_to_desc(irq);
102
103 if (desc) {
104 __synchronize_hardirq(desc, false);
105 return !atomic_read(&desc->threads_active);
106 }
107
108 return true;
109 }
110 EXPORT_SYMBOL(synchronize_hardirq);
111
112 /**
113 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
114 * @irq: interrupt number to wait for
115 *
116 * This function waits for any pending IRQ handlers for this interrupt
117 * to complete before returning. If you use this function while
118 * holding a resource the IRQ handler may need you will deadlock.
119 *
120 * Can only be called from preemptible code as it might sleep when
121 * an interrupt thread is associated to @irq.
122 *
123 * It optionally makes sure (when the irq chip supports that method)
124 * that the interrupt is not pending in any CPU and waiting for
125 * service.
126 */
synchronize_irq(unsigned int irq)127 void synchronize_irq(unsigned int irq)
128 {
129 struct irq_desc *desc = irq_to_desc(irq);
130
131 if (desc) {
132 __synchronize_hardirq(desc, true);
133 /*
134 * We made sure that no hardirq handler is
135 * running. Now verify that no threaded handlers are
136 * active.
137 */
138 wait_event(desc->wait_for_threads,
139 !atomic_read(&desc->threads_active));
140 }
141 }
142 EXPORT_SYMBOL(synchronize_irq);
143
144 #ifdef CONFIG_SMP
145 cpumask_var_t irq_default_affinity;
146
__irq_can_set_affinity(struct irq_desc * desc)147 static bool __irq_can_set_affinity(struct irq_desc *desc)
148 {
149 if (!desc || !irqd_can_balance(&desc->irq_data) ||
150 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
151 return false;
152 return true;
153 }
154
155 /**
156 * irq_can_set_affinity - Check if the affinity of a given irq can be set
157 * @irq: Interrupt to check
158 *
159 */
irq_can_set_affinity(unsigned int irq)160 int irq_can_set_affinity(unsigned int irq)
161 {
162 return __irq_can_set_affinity(irq_to_desc(irq));
163 }
164
165 /**
166 * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
167 * @irq: Interrupt to check
168 *
169 * Like irq_can_set_affinity() above, but additionally checks for the
170 * AFFINITY_MANAGED flag.
171 */
irq_can_set_affinity_usr(unsigned int irq)172 bool irq_can_set_affinity_usr(unsigned int irq)
173 {
174 struct irq_desc *desc = irq_to_desc(irq);
175
176 return __irq_can_set_affinity(desc) &&
177 !irqd_affinity_is_managed(&desc->irq_data);
178 }
179
180 /**
181 * irq_set_thread_affinity - Notify irq threads to adjust affinity
182 * @desc: irq descriptor which has affitnity changed
183 *
184 * We just set IRQTF_AFFINITY and delegate the affinity setting
185 * to the interrupt thread itself. We can not call
186 * set_cpus_allowed_ptr() here as we hold desc->lock and this
187 * code can be called from hard interrupt context.
188 */
irq_set_thread_affinity(struct irq_desc * desc)189 void irq_set_thread_affinity(struct irq_desc *desc)
190 {
191 struct irqaction *action;
192
193 for_each_action_of_desc(desc, action)
194 if (action->thread)
195 set_bit(IRQTF_AFFINITY, &action->thread_flags);
196 }
197
198 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
irq_validate_effective_affinity(struct irq_data * data)199 static void irq_validate_effective_affinity(struct irq_data *data)
200 {
201 const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
202 struct irq_chip *chip = irq_data_get_irq_chip(data);
203
204 if (!cpumask_empty(m))
205 return;
206 pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
207 chip->name, data->irq);
208 }
209
irq_init_effective_affinity(struct irq_data * data,const struct cpumask * mask)210 static inline void irq_init_effective_affinity(struct irq_data *data,
211 const struct cpumask *mask)
212 {
213 cpumask_copy(irq_data_get_effective_affinity_mask(data), mask);
214 }
215 #else
irq_validate_effective_affinity(struct irq_data * data)216 static inline void irq_validate_effective_affinity(struct irq_data *data) { }
irq_init_effective_affinity(struct irq_data * data,const struct cpumask * mask)217 static inline void irq_init_effective_affinity(struct irq_data *data,
218 const struct cpumask *mask) { }
219 #endif
220
irq_do_set_affinity(struct irq_data * data,const struct cpumask * mask,bool force)221 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
222 bool force)
223 {
224 struct irq_desc *desc = irq_data_to_desc(data);
225 struct irq_chip *chip = irq_data_get_irq_chip(data);
226 const struct cpumask *prog_mask;
227 int ret;
228
229 static DEFINE_RAW_SPINLOCK(tmp_mask_lock);
230 static struct cpumask tmp_mask;
231
232 if (!chip || !chip->irq_set_affinity)
233 return -EINVAL;
234
235 raw_spin_lock(&tmp_mask_lock);
236 /*
237 * If this is a managed interrupt and housekeeping is enabled on
238 * it check whether the requested affinity mask intersects with
239 * a housekeeping CPU. If so, then remove the isolated CPUs from
240 * the mask and just keep the housekeeping CPU(s). This prevents
241 * the affinity setter from routing the interrupt to an isolated
242 * CPU to avoid that I/O submitted from a housekeeping CPU causes
243 * interrupts on an isolated one.
244 *
245 * If the masks do not intersect or include online CPU(s) then
246 * keep the requested mask. The isolated target CPUs are only
247 * receiving interrupts when the I/O operation was submitted
248 * directly from them.
249 *
250 * If all housekeeping CPUs in the affinity mask are offline, the
251 * interrupt will be migrated by the CPU hotplug code once a
252 * housekeeping CPU which belongs to the affinity mask comes
253 * online.
254 */
255 if (irqd_affinity_is_managed(data) &&
256 housekeeping_enabled(HK_FLAG_MANAGED_IRQ)) {
257 const struct cpumask *hk_mask;
258
259 hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ);
260
261 cpumask_and(&tmp_mask, mask, hk_mask);
262 if (!cpumask_intersects(&tmp_mask, cpu_online_mask))
263 prog_mask = mask;
264 else
265 prog_mask = &tmp_mask;
266 } else {
267 prog_mask = mask;
268 }
269
270 /*
271 * Make sure we only provide online CPUs to the irqchip,
272 * unless we are being asked to force the affinity (in which
273 * case we do as we are told).
274 */
275 cpumask_and(&tmp_mask, prog_mask, cpu_online_mask);
276 if (!force && !cpumask_empty(&tmp_mask))
277 ret = chip->irq_set_affinity(data, &tmp_mask, force);
278 else if (force)
279 ret = chip->irq_set_affinity(data, mask, force);
280 else
281 ret = -EINVAL;
282
283 raw_spin_unlock(&tmp_mask_lock);
284
285 switch (ret) {
286 case IRQ_SET_MASK_OK:
287 case IRQ_SET_MASK_OK_DONE:
288 cpumask_copy(desc->irq_common_data.affinity, mask);
289 fallthrough;
290 case IRQ_SET_MASK_OK_NOCOPY:
291 irq_validate_effective_affinity(data);
292 irq_set_thread_affinity(desc);
293 ret = 0;
294 }
295
296 return ret;
297 }
298 EXPORT_SYMBOL_GPL(irq_do_set_affinity);
299
300 #ifdef CONFIG_GENERIC_PENDING_IRQ
irq_set_affinity_pending(struct irq_data * data,const struct cpumask * dest)301 static inline int irq_set_affinity_pending(struct irq_data *data,
302 const struct cpumask *dest)
303 {
304 struct irq_desc *desc = irq_data_to_desc(data);
305
306 irqd_set_move_pending(data);
307 irq_copy_pending(desc, dest);
308 return 0;
309 }
310 #else
irq_set_affinity_pending(struct irq_data * data,const struct cpumask * dest)311 static inline int irq_set_affinity_pending(struct irq_data *data,
312 const struct cpumask *dest)
313 {
314 return -EBUSY;
315 }
316 #endif
317
irq_try_set_affinity(struct irq_data * data,const struct cpumask * dest,bool force)318 static int irq_try_set_affinity(struct irq_data *data,
319 const struct cpumask *dest, bool force)
320 {
321 int ret = irq_do_set_affinity(data, dest, force);
322
323 /*
324 * In case that the underlying vector management is busy and the
325 * architecture supports the generic pending mechanism then utilize
326 * this to avoid returning an error to user space.
327 */
328 if (ret == -EBUSY && !force)
329 ret = irq_set_affinity_pending(data, dest);
330 return ret;
331 }
332
irq_set_affinity_deactivated(struct irq_data * data,const struct cpumask * mask,bool force)333 static bool irq_set_affinity_deactivated(struct irq_data *data,
334 const struct cpumask *mask, bool force)
335 {
336 struct irq_desc *desc = irq_data_to_desc(data);
337
338 /*
339 * Handle irq chips which can handle affinity only in activated
340 * state correctly
341 *
342 * If the interrupt is not yet activated, just store the affinity
343 * mask and do not call the chip driver at all. On activation the
344 * driver has to make sure anyway that the interrupt is in a
345 * usable state so startup works.
346 */
347 if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) ||
348 irqd_is_activated(data) || !irqd_affinity_on_activate(data))
349 return false;
350
351 cpumask_copy(desc->irq_common_data.affinity, mask);
352 irq_init_effective_affinity(data, mask);
353 irqd_set(data, IRQD_AFFINITY_SET);
354 return true;
355 }
356
irq_set_affinity_locked(struct irq_data * data,const struct cpumask * mask,bool force)357 int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
358 bool force)
359 {
360 struct irq_chip *chip = irq_data_get_irq_chip(data);
361 struct irq_desc *desc = irq_data_to_desc(data);
362 int ret = 0;
363
364 if (!chip || !chip->irq_set_affinity)
365 return -EINVAL;
366
367 if (irq_set_affinity_deactivated(data, mask, force))
368 return 0;
369
370 if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
371 ret = irq_try_set_affinity(data, mask, force);
372 } else {
373 irqd_set_move_pending(data);
374 irq_copy_pending(desc, mask);
375 }
376
377 if (desc->affinity_notify) {
378 kref_get(&desc->affinity_notify->kref);
379 if (!schedule_work(&desc->affinity_notify->work)) {
380 /* Work was already scheduled, drop our extra ref */
381 kref_put(&desc->affinity_notify->kref,
382 desc->affinity_notify->release);
383 }
384 }
385 irqd_set(data, IRQD_AFFINITY_SET);
386
387 return ret;
388 }
389
__irq_set_affinity(unsigned int irq,const struct cpumask * mask,bool force)390 int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
391 {
392 struct irq_desc *desc = irq_to_desc(irq);
393 unsigned long flags;
394 int ret;
395
396 if (!desc)
397 return -EINVAL;
398
399 raw_spin_lock_irqsave(&desc->lock, flags);
400 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
401 raw_spin_unlock_irqrestore(&desc->lock, flags);
402 return ret;
403 }
404
irq_set_affinity_hint(unsigned int irq,const struct cpumask * m)405 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
406 {
407 unsigned long flags;
408 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
409
410 if (!desc)
411 return -EINVAL;
412 desc->affinity_hint = m;
413 irq_put_desc_unlock(desc, flags);
414 /* set the initial affinity to prevent every interrupt being on CPU0 */
415 if (m)
416 __irq_set_affinity(irq, m, false);
417 return 0;
418 }
419 EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
420
irq_affinity_notify(struct work_struct * work)421 static void irq_affinity_notify(struct work_struct *work)
422 {
423 struct irq_affinity_notify *notify =
424 container_of(work, struct irq_affinity_notify, work);
425 struct irq_desc *desc = irq_to_desc(notify->irq);
426 cpumask_var_t cpumask;
427 unsigned long flags;
428
429 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
430 goto out;
431
432 raw_spin_lock_irqsave(&desc->lock, flags);
433 if (irq_move_pending(&desc->irq_data))
434 irq_get_pending(cpumask, desc);
435 else
436 cpumask_copy(cpumask, desc->irq_common_data.affinity);
437 raw_spin_unlock_irqrestore(&desc->lock, flags);
438
439 notify->notify(notify, cpumask);
440
441 free_cpumask_var(cpumask);
442 out:
443 kref_put(¬ify->kref, notify->release);
444 }
445
446 /**
447 * irq_set_affinity_notifier - control notification of IRQ affinity changes
448 * @irq: Interrupt for which to enable/disable notification
449 * @notify: Context for notification, or %NULL to disable
450 * notification. Function pointers must be initialised;
451 * the other fields will be initialised by this function.
452 *
453 * Must be called in process context. Notification may only be enabled
454 * after the IRQ is allocated and must be disabled before the IRQ is
455 * freed using free_irq().
456 */
457 int
irq_set_affinity_notifier(unsigned int irq,struct irq_affinity_notify * notify)458 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
459 {
460 struct irq_desc *desc = irq_to_desc(irq);
461 struct irq_affinity_notify *old_notify;
462 unsigned long flags;
463
464 /* The release function is promised process context */
465 might_sleep();
466
467 if (!desc || desc->istate & IRQS_NMI)
468 return -EINVAL;
469
470 /* Complete initialisation of *notify */
471 if (notify) {
472 notify->irq = irq;
473 kref_init(¬ify->kref);
474 INIT_WORK(¬ify->work, irq_affinity_notify);
475 }
476
477 raw_spin_lock_irqsave(&desc->lock, flags);
478 old_notify = desc->affinity_notify;
479 desc->affinity_notify = notify;
480 raw_spin_unlock_irqrestore(&desc->lock, flags);
481
482 if (old_notify) {
483 if (cancel_work_sync(&old_notify->work)) {
484 /* Pending work had a ref, put that one too */
485 kref_put(&old_notify->kref, old_notify->release);
486 }
487 kref_put(&old_notify->kref, old_notify->release);
488 }
489
490 return 0;
491 }
492 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
493
494 #ifndef CONFIG_AUTO_IRQ_AFFINITY
495 /*
496 * Generic version of the affinity autoselector.
497 */
irq_setup_affinity(struct irq_desc * desc)498 int irq_setup_affinity(struct irq_desc *desc)
499 {
500 struct cpumask *set = irq_default_affinity;
501 int ret, node = irq_desc_get_node(desc);
502 static DEFINE_RAW_SPINLOCK(mask_lock);
503 static struct cpumask mask;
504
505 /* Excludes PER_CPU and NO_BALANCE interrupts */
506 if (!__irq_can_set_affinity(desc))
507 return 0;
508
509 raw_spin_lock(&mask_lock);
510 /*
511 * Preserve the managed affinity setting and a userspace affinity
512 * setup, but make sure that one of the targets is online.
513 */
514 if (irqd_affinity_is_managed(&desc->irq_data) ||
515 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
516 if (cpumask_intersects(desc->irq_common_data.affinity,
517 cpu_online_mask))
518 set = desc->irq_common_data.affinity;
519 else
520 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
521 }
522
523 cpumask_and(&mask, cpu_online_mask, set);
524 if (cpumask_empty(&mask))
525 cpumask_copy(&mask, cpu_online_mask);
526
527 if (node != NUMA_NO_NODE) {
528 const struct cpumask *nodemask = cpumask_of_node(node);
529
530 /* make sure at least one of the cpus in nodemask is online */
531 if (cpumask_intersects(&mask, nodemask))
532 cpumask_and(&mask, &mask, nodemask);
533 }
534 ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
535 raw_spin_unlock(&mask_lock);
536 return ret;
537 }
538 #else
539 /* Wrapper for ALPHA specific affinity selector magic */
irq_setup_affinity(struct irq_desc * desc)540 int irq_setup_affinity(struct irq_desc *desc)
541 {
542 return irq_select_affinity(irq_desc_get_irq(desc));
543 }
544 #endif /* CONFIG_AUTO_IRQ_AFFINITY */
545 #endif /* CONFIG_SMP */
546
547
548 /**
549 * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
550 * @irq: interrupt number to set affinity
551 * @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
552 * specific data for percpu_devid interrupts
553 *
554 * This function uses the vCPU specific data to set the vCPU
555 * affinity for an irq. The vCPU specific data is passed from
556 * outside, such as KVM. One example code path is as below:
557 * KVM -> IOMMU -> irq_set_vcpu_affinity().
558 */
irq_set_vcpu_affinity(unsigned int irq,void * vcpu_info)559 int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
560 {
561 unsigned long flags;
562 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
563 struct irq_data *data;
564 struct irq_chip *chip;
565 int ret = -ENOSYS;
566
567 if (!desc)
568 return -EINVAL;
569
570 data = irq_desc_get_irq_data(desc);
571 do {
572 chip = irq_data_get_irq_chip(data);
573 if (chip && chip->irq_set_vcpu_affinity)
574 break;
575 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
576 data = data->parent_data;
577 #else
578 data = NULL;
579 #endif
580 } while (data);
581
582 if (data)
583 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
584 irq_put_desc_unlock(desc, flags);
585
586 return ret;
587 }
588 EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
589
__disable_irq(struct irq_desc * desc)590 void __disable_irq(struct irq_desc *desc)
591 {
592 if (!desc->depth++)
593 irq_disable(desc);
594 }
595
__disable_irq_nosync(unsigned int irq)596 static int __disable_irq_nosync(unsigned int irq)
597 {
598 unsigned long flags;
599 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
600
601 if (!desc)
602 return -EINVAL;
603 __disable_irq(desc);
604 irq_put_desc_busunlock(desc, flags);
605 return 0;
606 }
607
608 /**
609 * disable_irq_nosync - disable an irq without waiting
610 * @irq: Interrupt to disable
611 *
612 * Disable the selected interrupt line. Disables and Enables are
613 * nested.
614 * Unlike disable_irq(), this function does not ensure existing
615 * instances of the IRQ handler have completed before returning.
616 *
617 * This function may be called from IRQ context.
618 */
disable_irq_nosync(unsigned int irq)619 void disable_irq_nosync(unsigned int irq)
620 {
621 __disable_irq_nosync(irq);
622 }
623 EXPORT_SYMBOL(disable_irq_nosync);
624
625 /**
626 * disable_irq - disable an irq and wait for completion
627 * @irq: Interrupt to disable
628 *
629 * Disable the selected interrupt line. Enables and Disables are
630 * nested.
631 * This function waits for any pending IRQ handlers for this interrupt
632 * to complete before returning. If you use this function while
633 * holding a resource the IRQ handler may need you will deadlock.
634 *
635 * This function may be called - with care - from IRQ context.
636 */
disable_irq(unsigned int irq)637 void disable_irq(unsigned int irq)
638 {
639 if (!__disable_irq_nosync(irq))
640 synchronize_irq(irq);
641 }
642 EXPORT_SYMBOL(disable_irq);
643
644 /**
645 * disable_hardirq - disables an irq and waits for hardirq completion
646 * @irq: Interrupt to disable
647 *
648 * Disable the selected interrupt line. Enables and Disables are
649 * nested.
650 * This function waits for any pending hard IRQ handlers for this
651 * interrupt to complete before returning. If you use this function while
652 * holding a resource the hard IRQ handler may need you will deadlock.
653 *
654 * When used to optimistically disable an interrupt from atomic context
655 * the return value must be checked.
656 *
657 * Returns: false if a threaded handler is active.
658 *
659 * This function may be called - with care - from IRQ context.
660 */
disable_hardirq(unsigned int irq)661 bool disable_hardirq(unsigned int irq)
662 {
663 if (!__disable_irq_nosync(irq))
664 return synchronize_hardirq(irq);
665
666 return false;
667 }
668 EXPORT_SYMBOL_GPL(disable_hardirq);
669
670 /**
671 * disable_nmi_nosync - disable an nmi without waiting
672 * @irq: Interrupt to disable
673 *
674 * Disable the selected interrupt line. Disables and enables are
675 * nested.
676 * The interrupt to disable must have been requested through request_nmi.
677 * Unlike disable_nmi(), this function does not ensure existing
678 * instances of the IRQ handler have completed before returning.
679 */
disable_nmi_nosync(unsigned int irq)680 void disable_nmi_nosync(unsigned int irq)
681 {
682 disable_irq_nosync(irq);
683 }
684
__enable_irq(struct irq_desc * desc)685 void __enable_irq(struct irq_desc *desc)
686 {
687 switch (desc->depth) {
688 case 0:
689 err_out:
690 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
691 irq_desc_get_irq(desc));
692 break;
693 case 1: {
694 if (desc->istate & IRQS_SUSPENDED)
695 goto err_out;
696 /* Prevent probing on this irq: */
697 irq_settings_set_noprobe(desc);
698 /*
699 * Call irq_startup() not irq_enable() here because the
700 * interrupt might be marked NOAUTOEN. So irq_startup()
701 * needs to be invoked when it gets enabled the first
702 * time. If it was already started up, then irq_startup()
703 * will invoke irq_enable() under the hood.
704 */
705 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
706 break;
707 }
708 default:
709 desc->depth--;
710 }
711 }
712
713 /**
714 * enable_irq - enable handling of an irq
715 * @irq: Interrupt to enable
716 *
717 * Undoes the effect of one call to disable_irq(). If this
718 * matches the last disable, processing of interrupts on this
719 * IRQ line is re-enabled.
720 *
721 * This function may be called from IRQ context only when
722 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
723 */
enable_irq(unsigned int irq)724 void enable_irq(unsigned int irq)
725 {
726 unsigned long flags;
727 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
728
729 if (!desc)
730 return;
731 if (WARN(!desc->irq_data.chip,
732 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
733 goto out;
734
735 __enable_irq(desc);
736 out:
737 irq_put_desc_busunlock(desc, flags);
738 }
739 EXPORT_SYMBOL(enable_irq);
740
741 /**
742 * enable_nmi - enable handling of an nmi
743 * @irq: Interrupt to enable
744 *
745 * The interrupt to enable must have been requested through request_nmi.
746 * Undoes the effect of one call to disable_nmi(). If this
747 * matches the last disable, processing of interrupts on this
748 * IRQ line is re-enabled.
749 */
enable_nmi(unsigned int irq)750 void enable_nmi(unsigned int irq)
751 {
752 enable_irq(irq);
753 }
754
set_irq_wake_real(unsigned int irq,unsigned int on)755 static int set_irq_wake_real(unsigned int irq, unsigned int on)
756 {
757 struct irq_desc *desc = irq_to_desc(irq);
758 int ret = -ENXIO;
759
760 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
761 return 0;
762
763 if (desc->irq_data.chip->irq_set_wake)
764 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
765
766 return ret;
767 }
768
769 /**
770 * irq_set_irq_wake - control irq power management wakeup
771 * @irq: interrupt to control
772 * @on: enable/disable power management wakeup
773 *
774 * Enable/disable power management wakeup mode, which is
775 * disabled by default. Enables and disables must match,
776 * just as they match for non-wakeup mode support.
777 *
778 * Wakeup mode lets this IRQ wake the system from sleep
779 * states like "suspend to RAM".
780 *
781 * Note: irq enable/disable state is completely orthogonal
782 * to the enable/disable state of irq wake. An irq can be
783 * disabled with disable_irq() and still wake the system as
784 * long as the irq has wake enabled. If this does not hold,
785 * then the underlying irq chip and the related driver need
786 * to be investigated.
787 */
irq_set_irq_wake(unsigned int irq,unsigned int on)788 int irq_set_irq_wake(unsigned int irq, unsigned int on)
789 {
790 unsigned long flags;
791 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
792 int ret = 0;
793
794 if (!desc)
795 return -EINVAL;
796
797 /* Don't use NMIs as wake up interrupts please */
798 if (desc->istate & IRQS_NMI) {
799 ret = -EINVAL;
800 goto out_unlock;
801 }
802
803 /* wakeup-capable irqs can be shared between drivers that
804 * don't need to have the same sleep mode behaviors.
805 */
806 if (on) {
807 if (desc->wake_depth++ == 0) {
808 ret = set_irq_wake_real(irq, on);
809 if (ret)
810 desc->wake_depth = 0;
811 else
812 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
813 }
814 } else {
815 if (desc->wake_depth == 0) {
816 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
817 } else if (--desc->wake_depth == 0) {
818 ret = set_irq_wake_real(irq, on);
819 if (ret)
820 desc->wake_depth = 1;
821 else
822 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
823 }
824 }
825
826 out_unlock:
827 irq_put_desc_busunlock(desc, flags);
828 return ret;
829 }
830 EXPORT_SYMBOL(irq_set_irq_wake);
831
832 /*
833 * Internal function that tells the architecture code whether a
834 * particular irq has been exclusively allocated or is available
835 * for driver use.
836 */
can_request_irq(unsigned int irq,unsigned long irqflags)837 int can_request_irq(unsigned int irq, unsigned long irqflags)
838 {
839 unsigned long flags;
840 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
841 int canrequest = 0;
842
843 if (!desc)
844 return 0;
845
846 if (irq_settings_can_request(desc)) {
847 if (!desc->action ||
848 irqflags & desc->action->flags & IRQF_SHARED)
849 canrequest = 1;
850 }
851 irq_put_desc_unlock(desc, flags);
852 return canrequest;
853 }
854
__irq_set_trigger(struct irq_desc * desc,unsigned long flags)855 int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
856 {
857 struct irq_chip *chip = desc->irq_data.chip;
858 int ret, unmask = 0;
859
860 if (!chip || !chip->irq_set_type) {
861 /*
862 * IRQF_TRIGGER_* but the PIC does not support multiple
863 * flow-types?
864 */
865 pr_debug("No set_type function for IRQ %d (%s)\n",
866 irq_desc_get_irq(desc),
867 chip ? (chip->name ? : "unknown") : "unknown");
868 return 0;
869 }
870
871 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
872 if (!irqd_irq_masked(&desc->irq_data))
873 mask_irq(desc);
874 if (!irqd_irq_disabled(&desc->irq_data))
875 unmask = 1;
876 }
877
878 /* Mask all flags except trigger mode */
879 flags &= IRQ_TYPE_SENSE_MASK;
880 ret = chip->irq_set_type(&desc->irq_data, flags);
881
882 switch (ret) {
883 case IRQ_SET_MASK_OK:
884 case IRQ_SET_MASK_OK_DONE:
885 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
886 irqd_set(&desc->irq_data, flags);
887 fallthrough;
888
889 case IRQ_SET_MASK_OK_NOCOPY:
890 flags = irqd_get_trigger_type(&desc->irq_data);
891 irq_settings_set_trigger_mask(desc, flags);
892 irqd_clear(&desc->irq_data, IRQD_LEVEL);
893 irq_settings_clr_level(desc);
894 if (flags & IRQ_TYPE_LEVEL_MASK) {
895 irq_settings_set_level(desc);
896 irqd_set(&desc->irq_data, IRQD_LEVEL);
897 }
898
899 ret = 0;
900 break;
901 default:
902 pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n",
903 flags, irq_desc_get_irq(desc), chip->irq_set_type);
904 }
905 if (unmask)
906 unmask_irq(desc);
907 return ret;
908 }
909
910 #ifdef CONFIG_HARDIRQS_SW_RESEND
irq_set_parent(int irq,int parent_irq)911 int irq_set_parent(int irq, int parent_irq)
912 {
913 unsigned long flags;
914 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
915
916 if (!desc)
917 return -EINVAL;
918
919 desc->parent_irq = parent_irq;
920
921 irq_put_desc_unlock(desc, flags);
922 return 0;
923 }
924 EXPORT_SYMBOL_GPL(irq_set_parent);
925 #endif
926
927 /*
928 * Default primary interrupt handler for threaded interrupts. Is
929 * assigned as primary handler when request_threaded_irq is called
930 * with handler == NULL. Useful for oneshot interrupts.
931 */
irq_default_primary_handler(int irq,void * dev_id)932 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
933 {
934 return IRQ_WAKE_THREAD;
935 }
936
937 /*
938 * Primary handler for nested threaded interrupts. Should never be
939 * called.
940 */
irq_nested_primary_handler(int irq,void * dev_id)941 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
942 {
943 WARN(1, "Primary handler called for nested irq %d\n", irq);
944 return IRQ_NONE;
945 }
946
irq_forced_secondary_handler(int irq,void * dev_id)947 static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
948 {
949 WARN(1, "Secondary action handler called for irq %d\n", irq);
950 return IRQ_NONE;
951 }
952
irq_wait_for_interrupt(struct irqaction * action)953 static int irq_wait_for_interrupt(struct irqaction *action)
954 {
955 for (;;) {
956 set_current_state(TASK_INTERRUPTIBLE);
957
958 if (kthread_should_stop()) {
959 /* may need to run one last time */
960 if (test_and_clear_bit(IRQTF_RUNTHREAD,
961 &action->thread_flags)) {
962 __set_current_state(TASK_RUNNING);
963 return 0;
964 }
965 __set_current_state(TASK_RUNNING);
966 return -1;
967 }
968
969 if (test_and_clear_bit(IRQTF_RUNTHREAD,
970 &action->thread_flags)) {
971 __set_current_state(TASK_RUNNING);
972 return 0;
973 }
974 schedule();
975 }
976 }
977
978 /*
979 * Oneshot interrupts keep the irq line masked until the threaded
980 * handler finished. unmask if the interrupt has not been disabled and
981 * is marked MASKED.
982 */
irq_finalize_oneshot(struct irq_desc * desc,struct irqaction * action)983 static void irq_finalize_oneshot(struct irq_desc *desc,
984 struct irqaction *action)
985 {
986 if (!(desc->istate & IRQS_ONESHOT) ||
987 action->handler == irq_forced_secondary_handler)
988 return;
989 again:
990 chip_bus_lock(desc);
991 raw_spin_lock_irq(&desc->lock);
992
993 /*
994 * Implausible though it may be we need to protect us against
995 * the following scenario:
996 *
997 * The thread is faster done than the hard interrupt handler
998 * on the other CPU. If we unmask the irq line then the
999 * interrupt can come in again and masks the line, leaves due
1000 * to IRQS_INPROGRESS and the irq line is masked forever.
1001 *
1002 * This also serializes the state of shared oneshot handlers
1003 * versus "desc->threads_oneshot |= action->thread_mask;" in
1004 * irq_wake_thread(). See the comment there which explains the
1005 * serialization.
1006 */
1007 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
1008 raw_spin_unlock_irq(&desc->lock);
1009 chip_bus_sync_unlock(desc);
1010 cpu_relax();
1011 goto again;
1012 }
1013
1014 /*
1015 * Now check again, whether the thread should run. Otherwise
1016 * we would clear the threads_oneshot bit of this thread which
1017 * was just set.
1018 */
1019 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1020 goto out_unlock;
1021
1022 desc->threads_oneshot &= ~action->thread_mask;
1023
1024 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
1025 irqd_irq_masked(&desc->irq_data))
1026 unmask_threaded_irq(desc);
1027
1028 out_unlock:
1029 raw_spin_unlock_irq(&desc->lock);
1030 chip_bus_sync_unlock(desc);
1031 }
1032
1033 #ifdef CONFIG_SMP
1034 /*
1035 * Check whether we need to change the affinity of the interrupt thread.
1036 */
1037 static void
irq_thread_check_affinity(struct irq_desc * desc,struct irqaction * action)1038 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
1039 {
1040 cpumask_var_t mask;
1041 bool valid = true;
1042
1043 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
1044 return;
1045
1046 /*
1047 * In case we are out of memory we set IRQTF_AFFINITY again and
1048 * try again next time
1049 */
1050 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1051 set_bit(IRQTF_AFFINITY, &action->thread_flags);
1052 return;
1053 }
1054
1055 raw_spin_lock_irq(&desc->lock);
1056 /*
1057 * This code is triggered unconditionally. Check the affinity
1058 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
1059 */
1060 if (cpumask_available(desc->irq_common_data.affinity)) {
1061 const struct cpumask *m;
1062
1063 m = irq_data_get_effective_affinity_mask(&desc->irq_data);
1064 cpumask_copy(mask, m);
1065 } else {
1066 valid = false;
1067 }
1068 raw_spin_unlock_irq(&desc->lock);
1069
1070 if (valid)
1071 set_cpus_allowed_ptr(current, mask);
1072 free_cpumask_var(mask);
1073 }
1074 #else
1075 static inline void
irq_thread_check_affinity(struct irq_desc * desc,struct irqaction * action)1076 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
1077 #endif
1078
1079 /*
1080 * Interrupts which are not explicitly requested as threaded
1081 * interrupts rely on the implicit bh/preempt disable of the hard irq
1082 * context. So we need to disable bh here to avoid deadlocks and other
1083 * side effects.
1084 */
1085 static irqreturn_t
irq_forced_thread_fn(struct irq_desc * desc,struct irqaction * action)1086 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
1087 {
1088 irqreturn_t ret;
1089
1090 local_bh_disable();
1091 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1092 local_irq_disable();
1093 ret = action->thread_fn(action->irq, action->dev_id);
1094 if (ret == IRQ_HANDLED)
1095 atomic_inc(&desc->threads_handled);
1096
1097 irq_finalize_oneshot(desc, action);
1098 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1099 local_irq_enable();
1100 local_bh_enable();
1101 return ret;
1102 }
1103
1104 /*
1105 * Interrupts explicitly requested as threaded interrupts want to be
1106 * preemtible - many of them need to sleep and wait for slow busses to
1107 * complete.
1108 */
irq_thread_fn(struct irq_desc * desc,struct irqaction * action)1109 static irqreturn_t irq_thread_fn(struct irq_desc *desc,
1110 struct irqaction *action)
1111 {
1112 irqreturn_t ret;
1113
1114 ret = action->thread_fn(action->irq, action->dev_id);
1115 if (ret == IRQ_HANDLED)
1116 atomic_inc(&desc->threads_handled);
1117
1118 irq_finalize_oneshot(desc, action);
1119 return ret;
1120 }
1121
wake_threads_waitq(struct irq_desc * desc)1122 static void wake_threads_waitq(struct irq_desc *desc)
1123 {
1124 if (atomic_dec_and_test(&desc->threads_active))
1125 wake_up(&desc->wait_for_threads);
1126 }
1127
irq_thread_dtor(struct callback_head * unused)1128 static void irq_thread_dtor(struct callback_head *unused)
1129 {
1130 struct task_struct *tsk = current;
1131 struct irq_desc *desc;
1132 struct irqaction *action;
1133
1134 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
1135 return;
1136
1137 action = kthread_data(tsk);
1138
1139 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
1140 tsk->comm, tsk->pid, action->irq);
1141
1142
1143 desc = irq_to_desc(action->irq);
1144 /*
1145 * If IRQTF_RUNTHREAD is set, we need to decrement
1146 * desc->threads_active and wake possible waiters.
1147 */
1148 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1149 wake_threads_waitq(desc);
1150
1151 /* Prevent a stale desc->threads_oneshot */
1152 irq_finalize_oneshot(desc, action);
1153 }
1154
irq_wake_secondary(struct irq_desc * desc,struct irqaction * action)1155 static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
1156 {
1157 struct irqaction *secondary = action->secondary;
1158
1159 if (WARN_ON_ONCE(!secondary))
1160 return;
1161
1162 raw_spin_lock_irq(&desc->lock);
1163 __irq_wake_thread(desc, secondary);
1164 raw_spin_unlock_irq(&desc->lock);
1165 }
1166
1167 /*
1168 * Internal function to notify that a interrupt thread is ready.
1169 */
irq_thread_set_ready(struct irq_desc * desc,struct irqaction * action)1170 static void irq_thread_set_ready(struct irq_desc *desc,
1171 struct irqaction *action)
1172 {
1173 set_bit(IRQTF_READY, &action->thread_flags);
1174 wake_up(&desc->wait_for_threads);
1175 }
1176
1177 /*
1178 * Internal function to wake up a interrupt thread and wait until it is
1179 * ready.
1180 */
wake_up_and_wait_for_irq_thread_ready(struct irq_desc * desc,struct irqaction * action)1181 static void wake_up_and_wait_for_irq_thread_ready(struct irq_desc *desc,
1182 struct irqaction *action)
1183 {
1184 if (!action || !action->thread)
1185 return;
1186
1187 wake_up_process(action->thread);
1188 wait_event(desc->wait_for_threads,
1189 test_bit(IRQTF_READY, &action->thread_flags));
1190 }
1191
1192 /*
1193 * Interrupt handler thread
1194 */
irq_thread(void * data)1195 static int irq_thread(void *data)
1196 {
1197 struct callback_head on_exit_work;
1198 struct irqaction *action = data;
1199 struct irq_desc *desc = irq_to_desc(action->irq);
1200 irqreturn_t (*handler_fn)(struct irq_desc *desc,
1201 struct irqaction *action);
1202
1203 irq_thread_set_ready(desc, action);
1204
1205 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
1206 &action->thread_flags))
1207 handler_fn = irq_forced_thread_fn;
1208 else
1209 handler_fn = irq_thread_fn;
1210
1211 init_task_work(&on_exit_work, irq_thread_dtor);
1212 task_work_add(current, &on_exit_work, TWA_NONE);
1213
1214 irq_thread_check_affinity(desc, action);
1215
1216 while (!irq_wait_for_interrupt(action)) {
1217 irqreturn_t action_ret;
1218
1219 irq_thread_check_affinity(desc, action);
1220
1221 action_ret = handler_fn(desc, action);
1222 if (action_ret == IRQ_WAKE_THREAD)
1223 irq_wake_secondary(desc, action);
1224
1225 wake_threads_waitq(desc);
1226 }
1227
1228 /*
1229 * This is the regular exit path. __free_irq() is stopping the
1230 * thread via kthread_stop() after calling
1231 * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
1232 * oneshot mask bit can be set.
1233 */
1234 task_work_cancel(current, irq_thread_dtor);
1235 return 0;
1236 }
1237
1238 /**
1239 * irq_wake_thread - wake the irq thread for the action identified by dev_id
1240 * @irq: Interrupt line
1241 * @dev_id: Device identity for which the thread should be woken
1242 *
1243 */
irq_wake_thread(unsigned int irq,void * dev_id)1244 void irq_wake_thread(unsigned int irq, void *dev_id)
1245 {
1246 struct irq_desc *desc = irq_to_desc(irq);
1247 struct irqaction *action;
1248 unsigned long flags;
1249
1250 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1251 return;
1252
1253 raw_spin_lock_irqsave(&desc->lock, flags);
1254 for_each_action_of_desc(desc, action) {
1255 if (action->dev_id == dev_id) {
1256 if (action->thread)
1257 __irq_wake_thread(desc, action);
1258 break;
1259 }
1260 }
1261 raw_spin_unlock_irqrestore(&desc->lock, flags);
1262 }
1263 EXPORT_SYMBOL_GPL(irq_wake_thread);
1264
irq_setup_forced_threading(struct irqaction * new)1265 static int irq_setup_forced_threading(struct irqaction *new)
1266 {
1267 if (!force_irqthreads)
1268 return 0;
1269 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1270 return 0;
1271
1272 /*
1273 * No further action required for interrupts which are requested as
1274 * threaded interrupts already
1275 */
1276 if (new->handler == irq_default_primary_handler)
1277 return 0;
1278
1279 new->flags |= IRQF_ONESHOT;
1280
1281 /*
1282 * Handle the case where we have a real primary handler and a
1283 * thread handler. We force thread them as well by creating a
1284 * secondary action.
1285 */
1286 if (new->handler && new->thread_fn) {
1287 /* Allocate the secondary action */
1288 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1289 if (!new->secondary)
1290 return -ENOMEM;
1291 new->secondary->handler = irq_forced_secondary_handler;
1292 new->secondary->thread_fn = new->thread_fn;
1293 new->secondary->dev_id = new->dev_id;
1294 new->secondary->irq = new->irq;
1295 new->secondary->name = new->name;
1296 }
1297 /* Deal with the primary handler */
1298 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1299 new->thread_fn = new->handler;
1300 new->handler = irq_default_primary_handler;
1301 return 0;
1302 }
1303
irq_request_resources(struct irq_desc * desc)1304 static int irq_request_resources(struct irq_desc *desc)
1305 {
1306 struct irq_data *d = &desc->irq_data;
1307 struct irq_chip *c = d->chip;
1308
1309 return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1310 }
1311
irq_release_resources(struct irq_desc * desc)1312 static void irq_release_resources(struct irq_desc *desc)
1313 {
1314 struct irq_data *d = &desc->irq_data;
1315 struct irq_chip *c = d->chip;
1316
1317 if (c->irq_release_resources)
1318 c->irq_release_resources(d);
1319 }
1320
irq_supports_nmi(struct irq_desc * desc)1321 static bool irq_supports_nmi(struct irq_desc *desc)
1322 {
1323 struct irq_data *d = irq_desc_get_irq_data(desc);
1324
1325 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1326 /* Only IRQs directly managed by the root irqchip can be set as NMI */
1327 if (d->parent_data)
1328 return false;
1329 #endif
1330 /* Don't support NMIs for chips behind a slow bus */
1331 if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)
1332 return false;
1333
1334 return d->chip->flags & IRQCHIP_SUPPORTS_NMI;
1335 }
1336
irq_nmi_setup(struct irq_desc * desc)1337 static int irq_nmi_setup(struct irq_desc *desc)
1338 {
1339 struct irq_data *d = irq_desc_get_irq_data(desc);
1340 struct irq_chip *c = d->chip;
1341
1342 return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;
1343 }
1344
irq_nmi_teardown(struct irq_desc * desc)1345 static void irq_nmi_teardown(struct irq_desc *desc)
1346 {
1347 struct irq_data *d = irq_desc_get_irq_data(desc);
1348 struct irq_chip *c = d->chip;
1349
1350 if (c->irq_nmi_teardown)
1351 c->irq_nmi_teardown(d);
1352 }
1353
1354 static int
setup_irq_thread(struct irqaction * new,unsigned int irq,bool secondary)1355 setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1356 {
1357 struct task_struct *t;
1358
1359 if (!secondary) {
1360 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1361 new->name);
1362 } else {
1363 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1364 new->name);
1365 }
1366
1367 if (IS_ERR(t))
1368 return PTR_ERR(t);
1369
1370 sched_set_fifo(t);
1371
1372 /*
1373 * We keep the reference to the task struct even if
1374 * the thread dies to avoid that the interrupt code
1375 * references an already freed task_struct.
1376 */
1377 new->thread = get_task_struct(t);
1378 /*
1379 * Tell the thread to set its affinity. This is
1380 * important for shared interrupt handlers as we do
1381 * not invoke setup_affinity() for the secondary
1382 * handlers as everything is already set up. Even for
1383 * interrupts marked with IRQF_NO_BALANCE this is
1384 * correct as we want the thread to move to the cpu(s)
1385 * on which the requesting code placed the interrupt.
1386 */
1387 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1388 return 0;
1389 }
1390
1391 /*
1392 * Internal function to register an irqaction - typically used to
1393 * allocate special interrupts that are part of the architecture.
1394 *
1395 * Locking rules:
1396 *
1397 * desc->request_mutex Provides serialization against a concurrent free_irq()
1398 * chip_bus_lock Provides serialization for slow bus operations
1399 * desc->lock Provides serialization against hard interrupts
1400 *
1401 * chip_bus_lock and desc->lock are sufficient for all other management and
1402 * interrupt related functions. desc->request_mutex solely serializes
1403 * request/free_irq().
1404 */
1405 static int
__setup_irq(unsigned int irq,struct irq_desc * desc,struct irqaction * new)1406 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1407 {
1408 struct irqaction *old, **old_ptr;
1409 unsigned long flags, thread_mask = 0;
1410 int ret, nested, shared = 0;
1411
1412 if (!desc)
1413 return -EINVAL;
1414
1415 if (desc->irq_data.chip == &no_irq_chip)
1416 return -ENOSYS;
1417 if (!try_module_get(desc->owner))
1418 return -ENODEV;
1419
1420 new->irq = irq;
1421
1422 /*
1423 * If the trigger type is not specified by the caller,
1424 * then use the default for this interrupt.
1425 */
1426 if (!(new->flags & IRQF_TRIGGER_MASK))
1427 new->flags |= irqd_get_trigger_type(&desc->irq_data);
1428
1429 /*
1430 * Check whether the interrupt nests into another interrupt
1431 * thread.
1432 */
1433 nested = irq_settings_is_nested_thread(desc);
1434 if (nested) {
1435 if (!new->thread_fn) {
1436 ret = -EINVAL;
1437 goto out_mput;
1438 }
1439 /*
1440 * Replace the primary handler which was provided from
1441 * the driver for non nested interrupt handling by the
1442 * dummy function which warns when called.
1443 */
1444 new->handler = irq_nested_primary_handler;
1445 } else {
1446 if (irq_settings_can_thread(desc)) {
1447 ret = irq_setup_forced_threading(new);
1448 if (ret)
1449 goto out_mput;
1450 }
1451 }
1452
1453 /*
1454 * Create a handler thread when a thread function is supplied
1455 * and the interrupt does not nest into another interrupt
1456 * thread.
1457 */
1458 if (new->thread_fn && !nested) {
1459 ret = setup_irq_thread(new, irq, false);
1460 if (ret)
1461 goto out_mput;
1462 if (new->secondary) {
1463 ret = setup_irq_thread(new->secondary, irq, true);
1464 if (ret)
1465 goto out_thread;
1466 }
1467 }
1468
1469 /*
1470 * Drivers are often written to work w/o knowledge about the
1471 * underlying irq chip implementation, so a request for a
1472 * threaded irq without a primary hard irq context handler
1473 * requires the ONESHOT flag to be set. Some irq chips like
1474 * MSI based interrupts are per se one shot safe. Check the
1475 * chip flags, so we can avoid the unmask dance at the end of
1476 * the threaded handler for those.
1477 */
1478 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1479 new->flags &= ~IRQF_ONESHOT;
1480
1481 /*
1482 * Protects against a concurrent __free_irq() call which might wait
1483 * for synchronize_hardirq() to complete without holding the optional
1484 * chip bus lock and desc->lock. Also protects against handing out
1485 * a recycled oneshot thread_mask bit while it's still in use by
1486 * its previous owner.
1487 */
1488 mutex_lock(&desc->request_mutex);
1489
1490 /*
1491 * Acquire bus lock as the irq_request_resources() callback below
1492 * might rely on the serialization or the magic power management
1493 * functions which are abusing the irq_bus_lock() callback,
1494 */
1495 chip_bus_lock(desc);
1496
1497 /* First installed action requests resources. */
1498 if (!desc->action) {
1499 ret = irq_request_resources(desc);
1500 if (ret) {
1501 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1502 new->name, irq, desc->irq_data.chip->name);
1503 goto out_bus_unlock;
1504 }
1505 }
1506
1507 /*
1508 * The following block of code has to be executed atomically
1509 * protected against a concurrent interrupt and any of the other
1510 * management calls which are not serialized via
1511 * desc->request_mutex or the optional bus lock.
1512 */
1513 raw_spin_lock_irqsave(&desc->lock, flags);
1514 old_ptr = &desc->action;
1515 old = *old_ptr;
1516 if (old) {
1517 /*
1518 * Can't share interrupts unless both agree to and are
1519 * the same type (level, edge, polarity). So both flag
1520 * fields must have IRQF_SHARED set and the bits which
1521 * set the trigger type must match. Also all must
1522 * agree on ONESHOT.
1523 * Interrupt lines used for NMIs cannot be shared.
1524 */
1525 unsigned int oldtype;
1526
1527 if (desc->istate & IRQS_NMI) {
1528 pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
1529 new->name, irq, desc->irq_data.chip->name);
1530 ret = -EINVAL;
1531 goto out_unlock;
1532 }
1533
1534 /*
1535 * If nobody did set the configuration before, inherit
1536 * the one provided by the requester.
1537 */
1538 if (irqd_trigger_type_was_set(&desc->irq_data)) {
1539 oldtype = irqd_get_trigger_type(&desc->irq_data);
1540 } else {
1541 oldtype = new->flags & IRQF_TRIGGER_MASK;
1542 irqd_set_trigger_type(&desc->irq_data, oldtype);
1543 }
1544
1545 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1546 (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1547 ((old->flags ^ new->flags) & IRQF_ONESHOT))
1548 goto mismatch;
1549
1550 /* All handlers must agree on per-cpuness */
1551 if ((old->flags & IRQF_PERCPU) !=
1552 (new->flags & IRQF_PERCPU))
1553 goto mismatch;
1554
1555 /* add new interrupt at end of irq queue */
1556 do {
1557 /*
1558 * Or all existing action->thread_mask bits,
1559 * so we can find the next zero bit for this
1560 * new action.
1561 */
1562 thread_mask |= old->thread_mask;
1563 old_ptr = &old->next;
1564 old = *old_ptr;
1565 } while (old);
1566 shared = 1;
1567 }
1568
1569 /*
1570 * Setup the thread mask for this irqaction for ONESHOT. For
1571 * !ONESHOT irqs the thread mask is 0 so we can avoid a
1572 * conditional in irq_wake_thread().
1573 */
1574 if (new->flags & IRQF_ONESHOT) {
1575 /*
1576 * Unlikely to have 32 resp 64 irqs sharing one line,
1577 * but who knows.
1578 */
1579 if (thread_mask == ~0UL) {
1580 ret = -EBUSY;
1581 goto out_unlock;
1582 }
1583 /*
1584 * The thread_mask for the action is or'ed to
1585 * desc->thread_active to indicate that the
1586 * IRQF_ONESHOT thread handler has been woken, but not
1587 * yet finished. The bit is cleared when a thread
1588 * completes. When all threads of a shared interrupt
1589 * line have completed desc->threads_active becomes
1590 * zero and the interrupt line is unmasked. See
1591 * handle.c:irq_wake_thread() for further information.
1592 *
1593 * If no thread is woken by primary (hard irq context)
1594 * interrupt handlers, then desc->threads_active is
1595 * also checked for zero to unmask the irq line in the
1596 * affected hard irq flow handlers
1597 * (handle_[fasteoi|level]_irq).
1598 *
1599 * The new action gets the first zero bit of
1600 * thread_mask assigned. See the loop above which or's
1601 * all existing action->thread_mask bits.
1602 */
1603 new->thread_mask = 1UL << ffz(thread_mask);
1604
1605 } else if (new->handler == irq_default_primary_handler &&
1606 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1607 /*
1608 * The interrupt was requested with handler = NULL, so
1609 * we use the default primary handler for it. But it
1610 * does not have the oneshot flag set. In combination
1611 * with level interrupts this is deadly, because the
1612 * default primary handler just wakes the thread, then
1613 * the irq lines is reenabled, but the device still
1614 * has the level irq asserted. Rinse and repeat....
1615 *
1616 * While this works for edge type interrupts, we play
1617 * it safe and reject unconditionally because we can't
1618 * say for sure which type this interrupt really
1619 * has. The type flags are unreliable as the
1620 * underlying chip implementation can override them.
1621 */
1622 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n",
1623 new->name, irq);
1624 ret = -EINVAL;
1625 goto out_unlock;
1626 }
1627
1628 if (!shared) {
1629 /* Setup the type (level, edge polarity) if configured: */
1630 if (new->flags & IRQF_TRIGGER_MASK) {
1631 ret = __irq_set_trigger(desc,
1632 new->flags & IRQF_TRIGGER_MASK);
1633
1634 if (ret)
1635 goto out_unlock;
1636 }
1637
1638 /*
1639 * Activate the interrupt. That activation must happen
1640 * independently of IRQ_NOAUTOEN. request_irq() can fail
1641 * and the callers are supposed to handle
1642 * that. enable_irq() of an interrupt requested with
1643 * IRQ_NOAUTOEN is not supposed to fail. The activation
1644 * keeps it in shutdown mode, it merily associates
1645 * resources if necessary and if that's not possible it
1646 * fails. Interrupts which are in managed shutdown mode
1647 * will simply ignore that activation request.
1648 */
1649 ret = irq_activate(desc);
1650 if (ret)
1651 goto out_unlock;
1652
1653 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1654 IRQS_ONESHOT | IRQS_WAITING);
1655 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1656
1657 if (new->flags & IRQF_PERCPU) {
1658 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1659 irq_settings_set_per_cpu(desc);
1660 }
1661
1662 if (new->flags & IRQF_ONESHOT)
1663 desc->istate |= IRQS_ONESHOT;
1664
1665 /* Exclude IRQ from balancing if requested */
1666 if (new->flags & IRQF_NOBALANCING) {
1667 irq_settings_set_no_balancing(desc);
1668 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1669 }
1670
1671 if (!(new->flags & IRQF_NO_AUTOEN) &&
1672 irq_settings_can_autoenable(desc)) {
1673 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1674 } else {
1675 /*
1676 * Shared interrupts do not go well with disabling
1677 * auto enable. The sharing interrupt might request
1678 * it while it's still disabled and then wait for
1679 * interrupts forever.
1680 */
1681 WARN_ON_ONCE(new->flags & IRQF_SHARED);
1682 /* Undo nested disables: */
1683 desc->depth = 1;
1684 }
1685
1686 } else if (new->flags & IRQF_TRIGGER_MASK) {
1687 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1688 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1689
1690 if (nmsk != omsk)
1691 /* hope the handler works with current trigger mode */
1692 pr_warn("irq %d uses trigger mode %u; requested %u\n",
1693 irq, omsk, nmsk);
1694 }
1695
1696 *old_ptr = new;
1697
1698 irq_pm_install_action(desc, new);
1699
1700 /* Reset broken irq detection when installing new handler */
1701 desc->irq_count = 0;
1702 desc->irqs_unhandled = 0;
1703
1704 /*
1705 * Check whether we disabled the irq via the spurious handler
1706 * before. Reenable it and give it another chance.
1707 */
1708 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1709 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1710 __enable_irq(desc);
1711 }
1712
1713 raw_spin_unlock_irqrestore(&desc->lock, flags);
1714 chip_bus_sync_unlock(desc);
1715 mutex_unlock(&desc->request_mutex);
1716
1717 irq_setup_timings(desc, new);
1718
1719 wake_up_and_wait_for_irq_thread_ready(desc, new);
1720 wake_up_and_wait_for_irq_thread_ready(desc, new->secondary);
1721
1722 register_irq_proc(irq, desc);
1723 new->dir = NULL;
1724 register_handler_proc(irq, new);
1725 return 0;
1726
1727 mismatch:
1728 if (!(new->flags & IRQF_PROBE_SHARED)) {
1729 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1730 irq, new->flags, new->name, old->flags, old->name);
1731 #ifdef CONFIG_DEBUG_SHIRQ
1732 dump_stack();
1733 #endif
1734 }
1735 ret = -EBUSY;
1736
1737 out_unlock:
1738 raw_spin_unlock_irqrestore(&desc->lock, flags);
1739
1740 if (!desc->action)
1741 irq_release_resources(desc);
1742 out_bus_unlock:
1743 chip_bus_sync_unlock(desc);
1744 mutex_unlock(&desc->request_mutex);
1745
1746 out_thread:
1747 if (new->thread) {
1748 struct task_struct *t = new->thread;
1749
1750 new->thread = NULL;
1751 kthread_stop(t);
1752 put_task_struct(t);
1753 }
1754 if (new->secondary && new->secondary->thread) {
1755 struct task_struct *t = new->secondary->thread;
1756
1757 new->secondary->thread = NULL;
1758 kthread_stop(t);
1759 put_task_struct(t);
1760 }
1761 out_mput:
1762 module_put(desc->owner);
1763 return ret;
1764 }
1765
1766 /*
1767 * Internal function to unregister an irqaction - used to free
1768 * regular and special interrupts that are part of the architecture.
1769 */
__free_irq(struct irq_desc * desc,void * dev_id)1770 static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1771 {
1772 unsigned irq = desc->irq_data.irq;
1773 struct irqaction *action, **action_ptr;
1774 unsigned long flags;
1775
1776 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1777
1778 mutex_lock(&desc->request_mutex);
1779 chip_bus_lock(desc);
1780 raw_spin_lock_irqsave(&desc->lock, flags);
1781
1782 /*
1783 * There can be multiple actions per IRQ descriptor, find the right
1784 * one based on the dev_id:
1785 */
1786 action_ptr = &desc->action;
1787 for (;;) {
1788 action = *action_ptr;
1789
1790 if (!action) {
1791 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1792 raw_spin_unlock_irqrestore(&desc->lock, flags);
1793 chip_bus_sync_unlock(desc);
1794 mutex_unlock(&desc->request_mutex);
1795 return NULL;
1796 }
1797
1798 if (action->dev_id == dev_id)
1799 break;
1800 action_ptr = &action->next;
1801 }
1802
1803 /* Found it - now remove it from the list of entries: */
1804 *action_ptr = action->next;
1805
1806 irq_pm_remove_action(desc, action);
1807
1808 /* If this was the last handler, shut down the IRQ line: */
1809 if (!desc->action) {
1810 irq_settings_clr_disable_unlazy(desc);
1811 /* Only shutdown. Deactivate after synchronize_hardirq() */
1812 irq_shutdown(desc);
1813 }
1814
1815 #ifdef CONFIG_SMP
1816 /* make sure affinity_hint is cleaned up */
1817 if (WARN_ON_ONCE(desc->affinity_hint))
1818 desc->affinity_hint = NULL;
1819 #endif
1820
1821 raw_spin_unlock_irqrestore(&desc->lock, flags);
1822 /*
1823 * Drop bus_lock here so the changes which were done in the chip
1824 * callbacks above are synced out to the irq chips which hang
1825 * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
1826 *
1827 * Aside of that the bus_lock can also be taken from the threaded
1828 * handler in irq_finalize_oneshot() which results in a deadlock
1829 * because kthread_stop() would wait forever for the thread to
1830 * complete, which is blocked on the bus lock.
1831 *
1832 * The still held desc->request_mutex() protects against a
1833 * concurrent request_irq() of this irq so the release of resources
1834 * and timing data is properly serialized.
1835 */
1836 chip_bus_sync_unlock(desc);
1837
1838 unregister_handler_proc(irq, action);
1839
1840 /*
1841 * Make sure it's not being used on another CPU and if the chip
1842 * supports it also make sure that there is no (not yet serviced)
1843 * interrupt in flight at the hardware level.
1844 */
1845 __synchronize_hardirq(desc, true);
1846
1847 #ifdef CONFIG_DEBUG_SHIRQ
1848 /*
1849 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1850 * event to happen even now it's being freed, so let's make sure that
1851 * is so by doing an extra call to the handler ....
1852 *
1853 * ( We do this after actually deregistering it, to make sure that a
1854 * 'real' IRQ doesn't run in parallel with our fake. )
1855 */
1856 if (action->flags & IRQF_SHARED) {
1857 local_irq_save(flags);
1858 action->handler(irq, dev_id);
1859 local_irq_restore(flags);
1860 }
1861 #endif
1862
1863 /*
1864 * The action has already been removed above, but the thread writes
1865 * its oneshot mask bit when it completes. Though request_mutex is
1866 * held across this which prevents __setup_irq() from handing out
1867 * the same bit to a newly requested action.
1868 */
1869 if (action->thread) {
1870 kthread_stop(action->thread);
1871 put_task_struct(action->thread);
1872 if (action->secondary && action->secondary->thread) {
1873 kthread_stop(action->secondary->thread);
1874 put_task_struct(action->secondary->thread);
1875 }
1876 }
1877
1878 /* Last action releases resources */
1879 if (!desc->action) {
1880 /*
1881 * Reacquire bus lock as irq_release_resources() might
1882 * require it to deallocate resources over the slow bus.
1883 */
1884 chip_bus_lock(desc);
1885 /*
1886 * There is no interrupt on the fly anymore. Deactivate it
1887 * completely.
1888 */
1889 raw_spin_lock_irqsave(&desc->lock, flags);
1890 irq_domain_deactivate_irq(&desc->irq_data);
1891 raw_spin_unlock_irqrestore(&desc->lock, flags);
1892
1893 irq_release_resources(desc);
1894 chip_bus_sync_unlock(desc);
1895 irq_remove_timings(desc);
1896 }
1897
1898 mutex_unlock(&desc->request_mutex);
1899
1900 irq_chip_pm_put(&desc->irq_data);
1901 module_put(desc->owner);
1902 kfree(action->secondary);
1903 return action;
1904 }
1905
1906 /**
1907 * free_irq - free an interrupt allocated with request_irq
1908 * @irq: Interrupt line to free
1909 * @dev_id: Device identity to free
1910 *
1911 * Remove an interrupt handler. The handler is removed and if the
1912 * interrupt line is no longer in use by any driver it is disabled.
1913 * On a shared IRQ the caller must ensure the interrupt is disabled
1914 * on the card it drives before calling this function. The function
1915 * does not return until any executing interrupts for this IRQ
1916 * have completed.
1917 *
1918 * This function must not be called from interrupt context.
1919 *
1920 * Returns the devname argument passed to request_irq.
1921 */
free_irq(unsigned int irq,void * dev_id)1922 const void *free_irq(unsigned int irq, void *dev_id)
1923 {
1924 struct irq_desc *desc = irq_to_desc(irq);
1925 struct irqaction *action;
1926 const char *devname;
1927
1928 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1929 return NULL;
1930
1931 #ifdef CONFIG_SMP
1932 if (WARN_ON(desc->affinity_notify))
1933 desc->affinity_notify = NULL;
1934 #endif
1935
1936 action = __free_irq(desc, dev_id);
1937
1938 if (!action)
1939 return NULL;
1940
1941 devname = action->name;
1942 kfree(action);
1943 return devname;
1944 }
1945 EXPORT_SYMBOL(free_irq);
1946
1947 /* This function must be called with desc->lock held */
__cleanup_nmi(unsigned int irq,struct irq_desc * desc)1948 static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
1949 {
1950 const char *devname = NULL;
1951
1952 desc->istate &= ~IRQS_NMI;
1953
1954 if (!WARN_ON(desc->action == NULL)) {
1955 irq_pm_remove_action(desc, desc->action);
1956 devname = desc->action->name;
1957 unregister_handler_proc(irq, desc->action);
1958
1959 kfree(desc->action);
1960 desc->action = NULL;
1961 }
1962
1963 irq_settings_clr_disable_unlazy(desc);
1964 irq_shutdown_and_deactivate(desc);
1965
1966 irq_release_resources(desc);
1967
1968 irq_chip_pm_put(&desc->irq_data);
1969 module_put(desc->owner);
1970
1971 return devname;
1972 }
1973
free_nmi(unsigned int irq,void * dev_id)1974 const void *free_nmi(unsigned int irq, void *dev_id)
1975 {
1976 struct irq_desc *desc = irq_to_desc(irq);
1977 unsigned long flags;
1978 const void *devname;
1979
1980 if (!desc || WARN_ON(!(desc->istate & IRQS_NMI)))
1981 return NULL;
1982
1983 if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1984 return NULL;
1985
1986 /* NMI still enabled */
1987 if (WARN_ON(desc->depth == 0))
1988 disable_nmi_nosync(irq);
1989
1990 raw_spin_lock_irqsave(&desc->lock, flags);
1991
1992 irq_nmi_teardown(desc);
1993 devname = __cleanup_nmi(irq, desc);
1994
1995 raw_spin_unlock_irqrestore(&desc->lock, flags);
1996
1997 return devname;
1998 }
1999
2000 /**
2001 * request_threaded_irq - allocate an interrupt line
2002 * @irq: Interrupt line to allocate
2003 * @handler: Function to be called when the IRQ occurs.
2004 * Primary handler for threaded interrupts
2005 * If NULL and thread_fn != NULL the default
2006 * primary handler is installed
2007 * @thread_fn: Function called from the irq handler thread
2008 * If NULL, no irq thread is created
2009 * @irqflags: Interrupt type flags
2010 * @devname: An ascii name for the claiming device
2011 * @dev_id: A cookie passed back to the handler function
2012 *
2013 * This call allocates interrupt resources and enables the
2014 * interrupt line and IRQ handling. From the point this
2015 * call is made your handler function may be invoked. Since
2016 * your handler function must clear any interrupt the board
2017 * raises, you must take care both to initialise your hardware
2018 * and to set up the interrupt handler in the right order.
2019 *
2020 * If you want to set up a threaded irq handler for your device
2021 * then you need to supply @handler and @thread_fn. @handler is
2022 * still called in hard interrupt context and has to check
2023 * whether the interrupt originates from the device. If yes it
2024 * needs to disable the interrupt on the device and return
2025 * IRQ_WAKE_THREAD which will wake up the handler thread and run
2026 * @thread_fn. This split handler design is necessary to support
2027 * shared interrupts.
2028 *
2029 * Dev_id must be globally unique. Normally the address of the
2030 * device data structure is used as the cookie. Since the handler
2031 * receives this value it makes sense to use it.
2032 *
2033 * If your interrupt is shared you must pass a non NULL dev_id
2034 * as this is required when freeing the interrupt.
2035 *
2036 * Flags:
2037 *
2038 * IRQF_SHARED Interrupt is shared
2039 * IRQF_TRIGGER_* Specify active edge(s) or level
2040 *
2041 */
request_threaded_irq(unsigned int irq,irq_handler_t handler,irq_handler_t thread_fn,unsigned long irqflags,const char * devname,void * dev_id)2042 int request_threaded_irq(unsigned int irq, irq_handler_t handler,
2043 irq_handler_t thread_fn, unsigned long irqflags,
2044 const char *devname, void *dev_id)
2045 {
2046 struct irqaction *action;
2047 struct irq_desc *desc;
2048 int retval;
2049
2050 if (irq == IRQ_NOTCONNECTED)
2051 return -ENOTCONN;
2052
2053 /*
2054 * Sanity-check: shared interrupts must pass in a real dev-ID,
2055 * otherwise we'll have trouble later trying to figure out
2056 * which interrupt is which (messes up the interrupt freeing
2057 * logic etc).
2058 *
2059 * Also shared interrupts do not go well with disabling auto enable.
2060 * The sharing interrupt might request it while it's still disabled
2061 * and then wait for interrupts forever.
2062 *
2063 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
2064 * it cannot be set along with IRQF_NO_SUSPEND.
2065 */
2066 if (((irqflags & IRQF_SHARED) && !dev_id) ||
2067 ((irqflags & IRQF_SHARED) && (irqflags & IRQF_NO_AUTOEN)) ||
2068 (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
2069 ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
2070 return -EINVAL;
2071
2072 desc = irq_to_desc(irq);
2073 if (!desc)
2074 return -EINVAL;
2075
2076 if (!irq_settings_can_request(desc) ||
2077 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2078 return -EINVAL;
2079
2080 if (!handler) {
2081 if (!thread_fn)
2082 return -EINVAL;
2083 handler = irq_default_primary_handler;
2084 }
2085
2086 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2087 if (!action)
2088 return -ENOMEM;
2089
2090 action->handler = handler;
2091 action->thread_fn = thread_fn;
2092 action->flags = irqflags;
2093 action->name = devname;
2094 action->dev_id = dev_id;
2095
2096 retval = irq_chip_pm_get(&desc->irq_data);
2097 if (retval < 0) {
2098 kfree(action);
2099 return retval;
2100 }
2101
2102 retval = __setup_irq(irq, desc, action);
2103
2104 if (retval) {
2105 irq_chip_pm_put(&desc->irq_data);
2106 kfree(action->secondary);
2107 kfree(action);
2108 }
2109
2110 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
2111 if (!retval && (irqflags & IRQF_SHARED)) {
2112 /*
2113 * It's a shared IRQ -- the driver ought to be prepared for it
2114 * to happen immediately, so let's make sure....
2115 * We disable the irq to make sure that a 'real' IRQ doesn't
2116 * run in parallel with our fake.
2117 */
2118 unsigned long flags;
2119
2120 disable_irq(irq);
2121 local_irq_save(flags);
2122
2123 handler(irq, dev_id);
2124
2125 local_irq_restore(flags);
2126 enable_irq(irq);
2127 }
2128 #endif
2129 return retval;
2130 }
2131 EXPORT_SYMBOL(request_threaded_irq);
2132
2133 /**
2134 * request_any_context_irq - allocate an interrupt line
2135 * @irq: Interrupt line to allocate
2136 * @handler: Function to be called when the IRQ occurs.
2137 * Threaded handler for threaded interrupts.
2138 * @flags: Interrupt type flags
2139 * @name: An ascii name for the claiming device
2140 * @dev_id: A cookie passed back to the handler function
2141 *
2142 * This call allocates interrupt resources and enables the
2143 * interrupt line and IRQ handling. It selects either a
2144 * hardirq or threaded handling method depending on the
2145 * context.
2146 *
2147 * On failure, it returns a negative value. On success,
2148 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
2149 */
request_any_context_irq(unsigned int irq,irq_handler_t handler,unsigned long flags,const char * name,void * dev_id)2150 int request_any_context_irq(unsigned int irq, irq_handler_t handler,
2151 unsigned long flags, const char *name, void *dev_id)
2152 {
2153 struct irq_desc *desc;
2154 int ret;
2155
2156 if (irq == IRQ_NOTCONNECTED)
2157 return -ENOTCONN;
2158
2159 desc = irq_to_desc(irq);
2160 if (!desc)
2161 return -EINVAL;
2162
2163 if (irq_settings_is_nested_thread(desc)) {
2164 ret = request_threaded_irq(irq, NULL, handler,
2165 flags, name, dev_id);
2166 return !ret ? IRQC_IS_NESTED : ret;
2167 }
2168
2169 ret = request_irq(irq, handler, flags, name, dev_id);
2170 return !ret ? IRQC_IS_HARDIRQ : ret;
2171 }
2172 EXPORT_SYMBOL_GPL(request_any_context_irq);
2173
2174 /**
2175 * request_nmi - allocate an interrupt line for NMI delivery
2176 * @irq: Interrupt line to allocate
2177 * @handler: Function to be called when the IRQ occurs.
2178 * Threaded handler for threaded interrupts.
2179 * @irqflags: Interrupt type flags
2180 * @name: An ascii name for the claiming device
2181 * @dev_id: A cookie passed back to the handler function
2182 *
2183 * This call allocates interrupt resources and enables the
2184 * interrupt line and IRQ handling. It sets up the IRQ line
2185 * to be handled as an NMI.
2186 *
2187 * An interrupt line delivering NMIs cannot be shared and IRQ handling
2188 * cannot be threaded.
2189 *
2190 * Interrupt lines requested for NMI delivering must produce per cpu
2191 * interrupts and have auto enabling setting disabled.
2192 *
2193 * Dev_id must be globally unique. Normally the address of the
2194 * device data structure is used as the cookie. Since the handler
2195 * receives this value it makes sense to use it.
2196 *
2197 * If the interrupt line cannot be used to deliver NMIs, function
2198 * will fail and return a negative value.
2199 */
request_nmi(unsigned int irq,irq_handler_t handler,unsigned long irqflags,const char * name,void * dev_id)2200 int request_nmi(unsigned int irq, irq_handler_t handler,
2201 unsigned long irqflags, const char *name, void *dev_id)
2202 {
2203 struct irqaction *action;
2204 struct irq_desc *desc;
2205 unsigned long flags;
2206 int retval;
2207
2208 if (irq == IRQ_NOTCONNECTED)
2209 return -ENOTCONN;
2210
2211 /* NMI cannot be shared, used for Polling */
2212 if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))
2213 return -EINVAL;
2214
2215 if (!(irqflags & IRQF_PERCPU))
2216 return -EINVAL;
2217
2218 if (!handler)
2219 return -EINVAL;
2220
2221 desc = irq_to_desc(irq);
2222
2223 if (!desc || (irq_settings_can_autoenable(desc) &&
2224 !(irqflags & IRQF_NO_AUTOEN)) ||
2225 !irq_settings_can_request(desc) ||
2226 WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
2227 !irq_supports_nmi(desc))
2228 return -EINVAL;
2229
2230 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2231 if (!action)
2232 return -ENOMEM;
2233
2234 action->handler = handler;
2235 action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;
2236 action->name = name;
2237 action->dev_id = dev_id;
2238
2239 retval = irq_chip_pm_get(&desc->irq_data);
2240 if (retval < 0)
2241 goto err_out;
2242
2243 retval = __setup_irq(irq, desc, action);
2244 if (retval)
2245 goto err_irq_setup;
2246
2247 raw_spin_lock_irqsave(&desc->lock, flags);
2248
2249 /* Setup NMI state */
2250 desc->istate |= IRQS_NMI;
2251 retval = irq_nmi_setup(desc);
2252 if (retval) {
2253 __cleanup_nmi(irq, desc);
2254 raw_spin_unlock_irqrestore(&desc->lock, flags);
2255 return -EINVAL;
2256 }
2257
2258 raw_spin_unlock_irqrestore(&desc->lock, flags);
2259
2260 return 0;
2261
2262 err_irq_setup:
2263 irq_chip_pm_put(&desc->irq_data);
2264 err_out:
2265 kfree(action);
2266
2267 return retval;
2268 }
2269
enable_percpu_irq(unsigned int irq,unsigned int type)2270 void enable_percpu_irq(unsigned int irq, unsigned int type)
2271 {
2272 unsigned int cpu = smp_processor_id();
2273 unsigned long flags;
2274 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2275
2276 if (!desc)
2277 return;
2278
2279 /*
2280 * If the trigger type is not specified by the caller, then
2281 * use the default for this interrupt.
2282 */
2283 type &= IRQ_TYPE_SENSE_MASK;
2284 if (type == IRQ_TYPE_NONE)
2285 type = irqd_get_trigger_type(&desc->irq_data);
2286
2287 if (type != IRQ_TYPE_NONE) {
2288 int ret;
2289
2290 ret = __irq_set_trigger(desc, type);
2291
2292 if (ret) {
2293 WARN(1, "failed to set type for IRQ%d\n", irq);
2294 goto out;
2295 }
2296 }
2297
2298 irq_percpu_enable(desc, cpu);
2299 out:
2300 irq_put_desc_unlock(desc, flags);
2301 }
2302 EXPORT_SYMBOL_GPL(enable_percpu_irq);
2303
enable_percpu_nmi(unsigned int irq,unsigned int type)2304 void enable_percpu_nmi(unsigned int irq, unsigned int type)
2305 {
2306 enable_percpu_irq(irq, type);
2307 }
2308
2309 /**
2310 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
2311 * @irq: Linux irq number to check for
2312 *
2313 * Must be called from a non migratable context. Returns the enable
2314 * state of a per cpu interrupt on the current cpu.
2315 */
irq_percpu_is_enabled(unsigned int irq)2316 bool irq_percpu_is_enabled(unsigned int irq)
2317 {
2318 unsigned int cpu = smp_processor_id();
2319 struct irq_desc *desc;
2320 unsigned long flags;
2321 bool is_enabled;
2322
2323 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2324 if (!desc)
2325 return false;
2326
2327 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
2328 irq_put_desc_unlock(desc, flags);
2329
2330 return is_enabled;
2331 }
2332 EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
2333
disable_percpu_irq(unsigned int irq)2334 void disable_percpu_irq(unsigned int irq)
2335 {
2336 unsigned int cpu = smp_processor_id();
2337 unsigned long flags;
2338 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2339
2340 if (!desc)
2341 return;
2342
2343 irq_percpu_disable(desc, cpu);
2344 irq_put_desc_unlock(desc, flags);
2345 }
2346 EXPORT_SYMBOL_GPL(disable_percpu_irq);
2347
disable_percpu_nmi(unsigned int irq)2348 void disable_percpu_nmi(unsigned int irq)
2349 {
2350 disable_percpu_irq(irq);
2351 }
2352
2353 /*
2354 * Internal function to unregister a percpu irqaction.
2355 */
__free_percpu_irq(unsigned int irq,void __percpu * dev_id)2356 static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2357 {
2358 struct irq_desc *desc = irq_to_desc(irq);
2359 struct irqaction *action;
2360 unsigned long flags;
2361
2362 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
2363
2364 if (!desc)
2365 return NULL;
2366
2367 raw_spin_lock_irqsave(&desc->lock, flags);
2368
2369 action = desc->action;
2370 if (!action || action->percpu_dev_id != dev_id) {
2371 WARN(1, "Trying to free already-free IRQ %d\n", irq);
2372 goto bad;
2373 }
2374
2375 if (!cpumask_empty(desc->percpu_enabled)) {
2376 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
2377 irq, cpumask_first(desc->percpu_enabled));
2378 goto bad;
2379 }
2380
2381 /* Found it - now remove it from the list of entries: */
2382 desc->action = NULL;
2383
2384 desc->istate &= ~IRQS_NMI;
2385
2386 raw_spin_unlock_irqrestore(&desc->lock, flags);
2387
2388 unregister_handler_proc(irq, action);
2389
2390 irq_chip_pm_put(&desc->irq_data);
2391 module_put(desc->owner);
2392 return action;
2393
2394 bad:
2395 raw_spin_unlock_irqrestore(&desc->lock, flags);
2396 return NULL;
2397 }
2398
2399 /**
2400 * remove_percpu_irq - free a per-cpu interrupt
2401 * @irq: Interrupt line to free
2402 * @act: irqaction for the interrupt
2403 *
2404 * Used to remove interrupts statically setup by the early boot process.
2405 */
remove_percpu_irq(unsigned int irq,struct irqaction * act)2406 void remove_percpu_irq(unsigned int irq, struct irqaction *act)
2407 {
2408 struct irq_desc *desc = irq_to_desc(irq);
2409
2410 if (desc && irq_settings_is_per_cpu_devid(desc))
2411 __free_percpu_irq(irq, act->percpu_dev_id);
2412 }
2413
2414 /**
2415 * free_percpu_irq - free an interrupt allocated with request_percpu_irq
2416 * @irq: Interrupt line to free
2417 * @dev_id: Device identity to free
2418 *
2419 * Remove a percpu interrupt handler. The handler is removed, but
2420 * the interrupt line is not disabled. This must be done on each
2421 * CPU before calling this function. The function does not return
2422 * until any executing interrupts for this IRQ have completed.
2423 *
2424 * This function must not be called from interrupt context.
2425 */
free_percpu_irq(unsigned int irq,void __percpu * dev_id)2426 void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2427 {
2428 struct irq_desc *desc = irq_to_desc(irq);
2429
2430 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2431 return;
2432
2433 chip_bus_lock(desc);
2434 kfree(__free_percpu_irq(irq, dev_id));
2435 chip_bus_sync_unlock(desc);
2436 }
2437 EXPORT_SYMBOL_GPL(free_percpu_irq);
2438
free_percpu_nmi(unsigned int irq,void __percpu * dev_id)2439 void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
2440 {
2441 struct irq_desc *desc = irq_to_desc(irq);
2442
2443 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2444 return;
2445
2446 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2447 return;
2448
2449 kfree(__free_percpu_irq(irq, dev_id));
2450 }
2451
2452 /**
2453 * setup_percpu_irq - setup a per-cpu interrupt
2454 * @irq: Interrupt line to setup
2455 * @act: irqaction for the interrupt
2456 *
2457 * Used to statically setup per-cpu interrupts in the early boot process.
2458 */
setup_percpu_irq(unsigned int irq,struct irqaction * act)2459 int setup_percpu_irq(unsigned int irq, struct irqaction *act)
2460 {
2461 struct irq_desc *desc = irq_to_desc(irq);
2462 int retval;
2463
2464 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2465 return -EINVAL;
2466
2467 retval = irq_chip_pm_get(&desc->irq_data);
2468 if (retval < 0)
2469 return retval;
2470
2471 retval = __setup_irq(irq, desc, act);
2472
2473 if (retval)
2474 irq_chip_pm_put(&desc->irq_data);
2475
2476 return retval;
2477 }
2478
2479 /**
2480 * __request_percpu_irq - allocate a percpu interrupt line
2481 * @irq: Interrupt line to allocate
2482 * @handler: Function to be called when the IRQ occurs.
2483 * @flags: Interrupt type flags (IRQF_TIMER only)
2484 * @devname: An ascii name for the claiming device
2485 * @dev_id: A percpu cookie passed back to the handler function
2486 *
2487 * This call allocates interrupt resources and enables the
2488 * interrupt on the local CPU. If the interrupt is supposed to be
2489 * enabled on other CPUs, it has to be done on each CPU using
2490 * enable_percpu_irq().
2491 *
2492 * Dev_id must be globally unique. It is a per-cpu variable, and
2493 * the handler gets called with the interrupted CPU's instance of
2494 * that variable.
2495 */
__request_percpu_irq(unsigned int irq,irq_handler_t handler,unsigned long flags,const char * devname,void __percpu * dev_id)2496 int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2497 unsigned long flags, const char *devname,
2498 void __percpu *dev_id)
2499 {
2500 struct irqaction *action;
2501 struct irq_desc *desc;
2502 int retval;
2503
2504 if (!dev_id)
2505 return -EINVAL;
2506
2507 desc = irq_to_desc(irq);
2508 if (!desc || !irq_settings_can_request(desc) ||
2509 !irq_settings_is_per_cpu_devid(desc))
2510 return -EINVAL;
2511
2512 if (flags && flags != IRQF_TIMER)
2513 return -EINVAL;
2514
2515 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2516 if (!action)
2517 return -ENOMEM;
2518
2519 action->handler = handler;
2520 action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
2521 action->name = devname;
2522 action->percpu_dev_id = dev_id;
2523
2524 retval = irq_chip_pm_get(&desc->irq_data);
2525 if (retval < 0) {
2526 kfree(action);
2527 return retval;
2528 }
2529
2530 retval = __setup_irq(irq, desc, action);
2531
2532 if (retval) {
2533 irq_chip_pm_put(&desc->irq_data);
2534 kfree(action);
2535 }
2536
2537 return retval;
2538 }
2539 EXPORT_SYMBOL_GPL(__request_percpu_irq);
2540
2541 /**
2542 * request_percpu_nmi - allocate a percpu interrupt line for NMI delivery
2543 * @irq: Interrupt line to allocate
2544 * @handler: Function to be called when the IRQ occurs.
2545 * @name: An ascii name for the claiming device
2546 * @dev_id: A percpu cookie passed back to the handler function
2547 *
2548 * This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs
2549 * have to be setup on each CPU by calling prepare_percpu_nmi() before
2550 * being enabled on the same CPU by using enable_percpu_nmi().
2551 *
2552 * Dev_id must be globally unique. It is a per-cpu variable, and
2553 * the handler gets called with the interrupted CPU's instance of
2554 * that variable.
2555 *
2556 * Interrupt lines requested for NMI delivering should have auto enabling
2557 * setting disabled.
2558 *
2559 * If the interrupt line cannot be used to deliver NMIs, function
2560 * will fail returning a negative value.
2561 */
request_percpu_nmi(unsigned int irq,irq_handler_t handler,const char * name,void __percpu * dev_id)2562 int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
2563 const char *name, void __percpu *dev_id)
2564 {
2565 struct irqaction *action;
2566 struct irq_desc *desc;
2567 unsigned long flags;
2568 int retval;
2569
2570 if (!handler)
2571 return -EINVAL;
2572
2573 desc = irq_to_desc(irq);
2574
2575 if (!desc || !irq_settings_can_request(desc) ||
2576 !irq_settings_is_per_cpu_devid(desc) ||
2577 irq_settings_can_autoenable(desc) ||
2578 !irq_supports_nmi(desc))
2579 return -EINVAL;
2580
2581 /* The line cannot already be NMI */
2582 if (desc->istate & IRQS_NMI)
2583 return -EINVAL;
2584
2585 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2586 if (!action)
2587 return -ENOMEM;
2588
2589 action->handler = handler;
2590 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
2591 | IRQF_NOBALANCING;
2592 action->name = name;
2593 action->percpu_dev_id = dev_id;
2594
2595 retval = irq_chip_pm_get(&desc->irq_data);
2596 if (retval < 0)
2597 goto err_out;
2598
2599 retval = __setup_irq(irq, desc, action);
2600 if (retval)
2601 goto err_irq_setup;
2602
2603 raw_spin_lock_irqsave(&desc->lock, flags);
2604 desc->istate |= IRQS_NMI;
2605 raw_spin_unlock_irqrestore(&desc->lock, flags);
2606
2607 return 0;
2608
2609 err_irq_setup:
2610 irq_chip_pm_put(&desc->irq_data);
2611 err_out:
2612 kfree(action);
2613
2614 return retval;
2615 }
2616
2617 /**
2618 * prepare_percpu_nmi - performs CPU local setup for NMI delivery
2619 * @irq: Interrupt line to prepare for NMI delivery
2620 *
2621 * This call prepares an interrupt line to deliver NMI on the current CPU,
2622 * before that interrupt line gets enabled with enable_percpu_nmi().
2623 *
2624 * As a CPU local operation, this should be called from non-preemptible
2625 * context.
2626 *
2627 * If the interrupt line cannot be used to deliver NMIs, function
2628 * will fail returning a negative value.
2629 */
prepare_percpu_nmi(unsigned int irq)2630 int prepare_percpu_nmi(unsigned int irq)
2631 {
2632 unsigned long flags;
2633 struct irq_desc *desc;
2634 int ret = 0;
2635
2636 WARN_ON(preemptible());
2637
2638 desc = irq_get_desc_lock(irq, &flags,
2639 IRQ_GET_DESC_CHECK_PERCPU);
2640 if (!desc)
2641 return -EINVAL;
2642
2643 if (WARN(!(desc->istate & IRQS_NMI),
2644 KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
2645 irq)) {
2646 ret = -EINVAL;
2647 goto out;
2648 }
2649
2650 ret = irq_nmi_setup(desc);
2651 if (ret) {
2652 pr_err("Failed to setup NMI delivery: irq %u\n", irq);
2653 goto out;
2654 }
2655
2656 out:
2657 irq_put_desc_unlock(desc, flags);
2658 return ret;
2659 }
2660
2661 /**
2662 * teardown_percpu_nmi - undoes NMI setup of IRQ line
2663 * @irq: Interrupt line from which CPU local NMI configuration should be
2664 * removed
2665 *
2666 * This call undoes the setup done by prepare_percpu_nmi().
2667 *
2668 * IRQ line should not be enabled for the current CPU.
2669 *
2670 * As a CPU local operation, this should be called from non-preemptible
2671 * context.
2672 */
teardown_percpu_nmi(unsigned int irq)2673 void teardown_percpu_nmi(unsigned int irq)
2674 {
2675 unsigned long flags;
2676 struct irq_desc *desc;
2677
2678 WARN_ON(preemptible());
2679
2680 desc = irq_get_desc_lock(irq, &flags,
2681 IRQ_GET_DESC_CHECK_PERCPU);
2682 if (!desc)
2683 return;
2684
2685 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2686 goto out;
2687
2688 irq_nmi_teardown(desc);
2689 out:
2690 irq_put_desc_unlock(desc, flags);
2691 }
2692
__irq_get_irqchip_state(struct irq_data * data,enum irqchip_irq_state which,bool * state)2693 int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which,
2694 bool *state)
2695 {
2696 struct irq_chip *chip;
2697 int err = -EINVAL;
2698
2699 do {
2700 chip = irq_data_get_irq_chip(data);
2701 if (WARN_ON_ONCE(!chip))
2702 return -ENODEV;
2703 if (chip->irq_get_irqchip_state)
2704 break;
2705 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2706 data = data->parent_data;
2707 #else
2708 data = NULL;
2709 #endif
2710 } while (data);
2711
2712 if (data)
2713 err = chip->irq_get_irqchip_state(data, which, state);
2714 return err;
2715 }
2716
2717 /**
2718 * irq_get_irqchip_state - returns the irqchip state of a interrupt.
2719 * @irq: Interrupt line that is forwarded to a VM
2720 * @which: One of IRQCHIP_STATE_* the caller wants to know about
2721 * @state: a pointer to a boolean where the state is to be storeed
2722 *
2723 * This call snapshots the internal irqchip state of an
2724 * interrupt, returning into @state the bit corresponding to
2725 * stage @which
2726 *
2727 * This function should be called with preemption disabled if the
2728 * interrupt controller has per-cpu registers.
2729 */
irq_get_irqchip_state(unsigned int irq,enum irqchip_irq_state which,bool * state)2730 int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2731 bool *state)
2732 {
2733 struct irq_desc *desc;
2734 struct irq_data *data;
2735 unsigned long flags;
2736 int err = -EINVAL;
2737
2738 desc = irq_get_desc_buslock(irq, &flags, 0);
2739 if (!desc)
2740 return err;
2741
2742 data = irq_desc_get_irq_data(desc);
2743
2744 err = __irq_get_irqchip_state(data, which, state);
2745
2746 irq_put_desc_busunlock(desc, flags);
2747 return err;
2748 }
2749 EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2750
2751 /**
2752 * irq_set_irqchip_state - set the state of a forwarded interrupt.
2753 * @irq: Interrupt line that is forwarded to a VM
2754 * @which: State to be restored (one of IRQCHIP_STATE_*)
2755 * @val: Value corresponding to @which
2756 *
2757 * This call sets the internal irqchip state of an interrupt,
2758 * depending on the value of @which.
2759 *
2760 * This function should be called with preemption disabled if the
2761 * interrupt controller has per-cpu registers.
2762 */
irq_set_irqchip_state(unsigned int irq,enum irqchip_irq_state which,bool val)2763 int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2764 bool val)
2765 {
2766 struct irq_desc *desc;
2767 struct irq_data *data;
2768 struct irq_chip *chip;
2769 unsigned long flags;
2770 int err = -EINVAL;
2771
2772 desc = irq_get_desc_buslock(irq, &flags, 0);
2773 if (!desc)
2774 return err;
2775
2776 data = irq_desc_get_irq_data(desc);
2777
2778 do {
2779 chip = irq_data_get_irq_chip(data);
2780 if (WARN_ON_ONCE(!chip)) {
2781 err = -ENODEV;
2782 goto out_unlock;
2783 }
2784 if (chip->irq_set_irqchip_state)
2785 break;
2786 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2787 data = data->parent_data;
2788 #else
2789 data = NULL;
2790 #endif
2791 } while (data);
2792
2793 if (data)
2794 err = chip->irq_set_irqchip_state(data, which, val);
2795
2796 out_unlock:
2797 irq_put_desc_busunlock(desc, flags);
2798 return err;
2799 }
2800 EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
2801