1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
5 *
6 * This file contains the core interrupt handling code, for irq-chip based
7 * architectures. Detailed information is available in
8 * Documentation/core-api/genericirq.rst
9 */
10
11 #include <linux/irq.h>
12 #include <linux/msi.h>
13 #include <linux/module.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/irqdomain.h>
17 #include <linux/wakeup_reason.h>
18
19 #include <trace/events/irq.h>
20
21 #include "internals.h"
22
bad_chained_irq(int irq,void * dev_id)23 static irqreturn_t bad_chained_irq(int irq, void *dev_id)
24 {
25 WARN_ONCE(1, "Chained irq %d should not call an action\n", irq);
26 return IRQ_NONE;
27 }
28
29 /*
30 * Chained handlers should never call action on their IRQ. This default
31 * action will emit warning if such thing happens.
32 */
33 struct irqaction chained_action = {
34 .handler = bad_chained_irq,
35 };
36
37 /**
38 * irq_set_chip - set the irq chip for an irq
39 * @irq: irq number
40 * @chip: pointer to irq chip description structure
41 */
irq_set_chip(unsigned int irq,struct irq_chip * chip)42 int irq_set_chip(unsigned int irq, struct irq_chip *chip)
43 {
44 unsigned long flags;
45 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
46
47 if (!desc)
48 return -EINVAL;
49
50 if (!chip)
51 chip = &no_irq_chip;
52
53 desc->irq_data.chip = chip;
54 irq_put_desc_unlock(desc, flags);
55 /*
56 * For !CONFIG_SPARSE_IRQ make the irq show up in
57 * allocated_irqs.
58 */
59 irq_mark_irq(irq);
60 return 0;
61 }
62 EXPORT_SYMBOL(irq_set_chip);
63
64 /**
65 * irq_set_type - set the irq trigger type for an irq
66 * @irq: irq number
67 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
68 */
irq_set_irq_type(unsigned int irq,unsigned int type)69 int irq_set_irq_type(unsigned int irq, unsigned int type)
70 {
71 unsigned long flags;
72 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
73 int ret = 0;
74
75 if (!desc)
76 return -EINVAL;
77
78 ret = __irq_set_trigger(desc, type);
79 irq_put_desc_busunlock(desc, flags);
80 return ret;
81 }
82 EXPORT_SYMBOL(irq_set_irq_type);
83
84 /**
85 * irq_set_handler_data - set irq handler data for an irq
86 * @irq: Interrupt number
87 * @data: Pointer to interrupt specific data
88 *
89 * Set the hardware irq controller data for an irq
90 */
irq_set_handler_data(unsigned int irq,void * data)91 int irq_set_handler_data(unsigned int irq, void *data)
92 {
93 unsigned long flags;
94 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
95
96 if (!desc)
97 return -EINVAL;
98 desc->irq_common_data.handler_data = data;
99 irq_put_desc_unlock(desc, flags);
100 return 0;
101 }
102 EXPORT_SYMBOL(irq_set_handler_data);
103
104 /**
105 * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
106 * @irq_base: Interrupt number base
107 * @irq_offset: Interrupt number offset
108 * @entry: Pointer to MSI descriptor data
109 *
110 * Set the MSI descriptor entry for an irq at offset
111 */
irq_set_msi_desc_off(unsigned int irq_base,unsigned int irq_offset,struct msi_desc * entry)112 int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
113 struct msi_desc *entry)
114 {
115 unsigned long flags;
116 struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
117
118 if (!desc)
119 return -EINVAL;
120 desc->irq_common_data.msi_desc = entry;
121 if (entry && !irq_offset)
122 entry->irq = irq_base;
123 irq_put_desc_unlock(desc, flags);
124 return 0;
125 }
126
127 /**
128 * irq_set_msi_desc - set MSI descriptor data for an irq
129 * @irq: Interrupt number
130 * @entry: Pointer to MSI descriptor data
131 *
132 * Set the MSI descriptor entry for an irq
133 */
irq_set_msi_desc(unsigned int irq,struct msi_desc * entry)134 int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
135 {
136 return irq_set_msi_desc_off(irq, 0, entry);
137 }
138
139 /**
140 * irq_set_chip_data - set irq chip data for an irq
141 * @irq: Interrupt number
142 * @data: Pointer to chip specific data
143 *
144 * Set the hardware irq chip data for an irq
145 */
irq_set_chip_data(unsigned int irq,void * data)146 int irq_set_chip_data(unsigned int irq, void *data)
147 {
148 unsigned long flags;
149 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
150
151 if (!desc)
152 return -EINVAL;
153 desc->irq_data.chip_data = data;
154 irq_put_desc_unlock(desc, flags);
155 return 0;
156 }
157 EXPORT_SYMBOL(irq_set_chip_data);
158
irq_get_irq_data(unsigned int irq)159 struct irq_data *irq_get_irq_data(unsigned int irq)
160 {
161 struct irq_desc *desc = irq_to_desc(irq);
162
163 return desc ? &desc->irq_data : NULL;
164 }
165 EXPORT_SYMBOL_GPL(irq_get_irq_data);
166
irq_state_clr_disabled(struct irq_desc * desc)167 static void irq_state_clr_disabled(struct irq_desc *desc)
168 {
169 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
170 }
171
irq_state_clr_masked(struct irq_desc * desc)172 static void irq_state_clr_masked(struct irq_desc *desc)
173 {
174 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
175 }
176
irq_state_clr_started(struct irq_desc * desc)177 static void irq_state_clr_started(struct irq_desc *desc)
178 {
179 irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED);
180 }
181
irq_state_set_started(struct irq_desc * desc)182 static void irq_state_set_started(struct irq_desc *desc)
183 {
184 irqd_set(&desc->irq_data, IRQD_IRQ_STARTED);
185 }
186
187 enum {
188 IRQ_STARTUP_NORMAL,
189 IRQ_STARTUP_MANAGED,
190 IRQ_STARTUP_ABORT,
191 };
192
193 #ifdef CONFIG_SMP
194 static int
__irq_startup_managed(struct irq_desc * desc,struct cpumask * aff,bool force)195 __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
196 {
197 struct irq_data *d = irq_desc_get_irq_data(desc);
198
199 if (!irqd_affinity_is_managed(d))
200 return IRQ_STARTUP_NORMAL;
201
202 irqd_clr_managed_shutdown(d);
203
204 if (cpumask_any_and(aff, cpu_online_mask) >= nr_cpu_ids) {
205 /*
206 * Catch code which fiddles with enable_irq() on a managed
207 * and potentially shutdown IRQ. Chained interrupt
208 * installment or irq auto probing should not happen on
209 * managed irqs either.
210 */
211 if (WARN_ON_ONCE(force))
212 return IRQ_STARTUP_ABORT;
213 /*
214 * The interrupt was requested, but there is no online CPU
215 * in it's affinity mask. Put it into managed shutdown
216 * state and let the cpu hotplug mechanism start it up once
217 * a CPU in the mask becomes available.
218 */
219 return IRQ_STARTUP_ABORT;
220 }
221 /*
222 * Managed interrupts have reserved resources, so this should not
223 * happen.
224 */
225 if (WARN_ON(irq_domain_activate_irq(d, false)))
226 return IRQ_STARTUP_ABORT;
227 return IRQ_STARTUP_MANAGED;
228 }
229 #else
230 static __always_inline int
__irq_startup_managed(struct irq_desc * desc,struct cpumask * aff,bool force)231 __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
232 {
233 return IRQ_STARTUP_NORMAL;
234 }
235 #endif
236
__irq_startup(struct irq_desc * desc)237 static int __irq_startup(struct irq_desc *desc)
238 {
239 struct irq_data *d = irq_desc_get_irq_data(desc);
240 int ret = 0;
241
242 /* Warn if this interrupt is not activated but try nevertheless */
243 WARN_ON_ONCE(!irqd_is_activated(d));
244
245 if (d->chip->irq_startup) {
246 ret = d->chip->irq_startup(d);
247 irq_state_clr_disabled(desc);
248 irq_state_clr_masked(desc);
249 } else {
250 irq_enable(desc);
251 }
252 irq_state_set_started(desc);
253 return ret;
254 }
255
irq_startup(struct irq_desc * desc,bool resend,bool force)256 int irq_startup(struct irq_desc *desc, bool resend, bool force)
257 {
258 struct irq_data *d = irq_desc_get_irq_data(desc);
259 struct cpumask *aff = irq_data_get_affinity_mask(d);
260 int ret = 0;
261
262 desc->depth = 0;
263
264 if (irqd_is_started(d)) {
265 irq_enable(desc);
266 } else {
267 switch (__irq_startup_managed(desc, aff, force)) {
268 case IRQ_STARTUP_NORMAL:
269 if (d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP)
270 irq_setup_affinity(desc);
271 ret = __irq_startup(desc);
272 if (!(d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP))
273 irq_setup_affinity(desc);
274 break;
275 case IRQ_STARTUP_MANAGED:
276 irq_do_set_affinity(d, aff, false);
277 ret = __irq_startup(desc);
278 break;
279 case IRQ_STARTUP_ABORT:
280 irqd_set_managed_shutdown(d);
281 return 0;
282 }
283 }
284 if (resend)
285 check_irq_resend(desc);
286
287 return ret;
288 }
289
irq_activate(struct irq_desc * desc)290 int irq_activate(struct irq_desc *desc)
291 {
292 struct irq_data *d = irq_desc_get_irq_data(desc);
293
294 if (!irqd_affinity_is_managed(d))
295 return irq_domain_activate_irq(d, false);
296 return 0;
297 }
298
irq_activate_and_startup(struct irq_desc * desc,bool resend)299 int irq_activate_and_startup(struct irq_desc *desc, bool resend)
300 {
301 if (WARN_ON(irq_activate(desc)))
302 return 0;
303 return irq_startup(desc, resend, IRQ_START_FORCE);
304 }
305
306 static void __irq_disable(struct irq_desc *desc, bool mask);
307
irq_shutdown(struct irq_desc * desc)308 void irq_shutdown(struct irq_desc *desc)
309 {
310 if (irqd_is_started(&desc->irq_data)) {
311 desc->depth = 1;
312 if (desc->irq_data.chip->irq_shutdown) {
313 desc->irq_data.chip->irq_shutdown(&desc->irq_data);
314 irq_state_set_disabled(desc);
315 irq_state_set_masked(desc);
316 } else {
317 __irq_disable(desc, true);
318 }
319 irq_state_clr_started(desc);
320 }
321 }
322
323
irq_shutdown_and_deactivate(struct irq_desc * desc)324 void irq_shutdown_and_deactivate(struct irq_desc *desc)
325 {
326 irq_shutdown(desc);
327 /*
328 * This must be called even if the interrupt was never started up,
329 * because the activation can happen before the interrupt is
330 * available for request/startup. It has it's own state tracking so
331 * it's safe to call it unconditionally.
332 */
333 irq_domain_deactivate_irq(&desc->irq_data);
334 }
335
irq_enable(struct irq_desc * desc)336 void irq_enable(struct irq_desc *desc)
337 {
338 if (!irqd_irq_disabled(&desc->irq_data)) {
339 unmask_irq(desc);
340 } else {
341 irq_state_clr_disabled(desc);
342 if (desc->irq_data.chip->irq_enable) {
343 desc->irq_data.chip->irq_enable(&desc->irq_data);
344 irq_state_clr_masked(desc);
345 } else {
346 unmask_irq(desc);
347 }
348 }
349 }
350
__irq_disable(struct irq_desc * desc,bool mask)351 static void __irq_disable(struct irq_desc *desc, bool mask)
352 {
353 if (irqd_irq_disabled(&desc->irq_data)) {
354 if (mask)
355 mask_irq(desc);
356 } else {
357 irq_state_set_disabled(desc);
358 if (desc->irq_data.chip->irq_disable) {
359 desc->irq_data.chip->irq_disable(&desc->irq_data);
360 irq_state_set_masked(desc);
361 } else if (mask) {
362 mask_irq(desc);
363 }
364 }
365 }
366
367 /**
368 * irq_disable - Mark interrupt disabled
369 * @desc: irq descriptor which should be disabled
370 *
371 * If the chip does not implement the irq_disable callback, we
372 * use a lazy disable approach. That means we mark the interrupt
373 * disabled, but leave the hardware unmasked. That's an
374 * optimization because we avoid the hardware access for the
375 * common case where no interrupt happens after we marked it
376 * disabled. If an interrupt happens, then the interrupt flow
377 * handler masks the line at the hardware level and marks it
378 * pending.
379 *
380 * If the interrupt chip does not implement the irq_disable callback,
381 * a driver can disable the lazy approach for a particular irq line by
382 * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can
383 * be used for devices which cannot disable the interrupt at the
384 * device level under certain circumstances and have to use
385 * disable_irq[_nosync] instead.
386 */
irq_disable(struct irq_desc * desc)387 void irq_disable(struct irq_desc *desc)
388 {
389 __irq_disable(desc, irq_settings_disable_unlazy(desc));
390 }
391
irq_percpu_enable(struct irq_desc * desc,unsigned int cpu)392 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
393 {
394 if (desc->irq_data.chip->irq_enable)
395 desc->irq_data.chip->irq_enable(&desc->irq_data);
396 else
397 desc->irq_data.chip->irq_unmask(&desc->irq_data);
398 cpumask_set_cpu(cpu, desc->percpu_enabled);
399 }
400
irq_percpu_disable(struct irq_desc * desc,unsigned int cpu)401 void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
402 {
403 if (desc->irq_data.chip->irq_disable)
404 desc->irq_data.chip->irq_disable(&desc->irq_data);
405 else
406 desc->irq_data.chip->irq_mask(&desc->irq_data);
407 cpumask_clear_cpu(cpu, desc->percpu_enabled);
408 }
409
mask_ack_irq(struct irq_desc * desc)410 static inline void mask_ack_irq(struct irq_desc *desc)
411 {
412 if (desc->irq_data.chip->irq_mask_ack) {
413 desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
414 irq_state_set_masked(desc);
415 } else {
416 mask_irq(desc);
417 if (desc->irq_data.chip->irq_ack)
418 desc->irq_data.chip->irq_ack(&desc->irq_data);
419 }
420 }
421
mask_irq(struct irq_desc * desc)422 void mask_irq(struct irq_desc *desc)
423 {
424 if (irqd_irq_masked(&desc->irq_data))
425 return;
426
427 if (desc->irq_data.chip->irq_mask) {
428 desc->irq_data.chip->irq_mask(&desc->irq_data);
429 irq_state_set_masked(desc);
430 }
431 }
432
unmask_irq(struct irq_desc * desc)433 void unmask_irq(struct irq_desc *desc)
434 {
435 if (!irqd_irq_masked(&desc->irq_data))
436 return;
437
438 if (desc->irq_data.chip->irq_unmask) {
439 desc->irq_data.chip->irq_unmask(&desc->irq_data);
440 irq_state_clr_masked(desc);
441 }
442 }
443
unmask_threaded_irq(struct irq_desc * desc)444 void unmask_threaded_irq(struct irq_desc *desc)
445 {
446 struct irq_chip *chip = desc->irq_data.chip;
447
448 if (chip->flags & IRQCHIP_EOI_THREADED)
449 chip->irq_eoi(&desc->irq_data);
450
451 unmask_irq(desc);
452 }
453
454 /*
455 * handle_nested_irq - Handle a nested irq from a irq thread
456 * @irq: the interrupt number
457 *
458 * Handle interrupts which are nested into a threaded interrupt
459 * handler. The handler function is called inside the calling
460 * threads context.
461 */
handle_nested_irq(unsigned int irq)462 void handle_nested_irq(unsigned int irq)
463 {
464 struct irq_desc *desc = irq_to_desc(irq);
465 struct irqaction *action;
466 irqreturn_t action_ret;
467
468 might_sleep();
469
470 raw_spin_lock_irq(&desc->lock);
471
472 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
473
474 action = desc->action;
475 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
476 desc->istate |= IRQS_PENDING;
477 goto out_unlock;
478 }
479
480 kstat_incr_irqs_this_cpu(desc);
481 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
482 raw_spin_unlock_irq(&desc->lock);
483
484 action_ret = IRQ_NONE;
485 for_each_action_of_desc(desc, action)
486 action_ret |= action->thread_fn(action->irq, action->dev_id);
487
488 if (!noirqdebug)
489 note_interrupt(desc, action_ret);
490
491 raw_spin_lock_irq(&desc->lock);
492 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
493
494 out_unlock:
495 raw_spin_unlock_irq(&desc->lock);
496 }
497 EXPORT_SYMBOL_GPL(handle_nested_irq);
498
irq_check_poll(struct irq_desc * desc)499 static bool irq_check_poll(struct irq_desc *desc)
500 {
501 if (!(desc->istate & IRQS_POLL_INPROGRESS))
502 return false;
503 return irq_wait_for_poll(desc);
504 }
505
irq_may_run(struct irq_desc * desc)506 static bool irq_may_run(struct irq_desc *desc)
507 {
508 unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;
509
510 /*
511 * If the interrupt is not in progress and is not an armed
512 * wakeup interrupt, proceed.
513 */
514 if (!irqd_has_set(&desc->irq_data, mask)) {
515 #ifdef CONFIG_PM_SLEEP
516 if (unlikely(desc->no_suspend_depth &&
517 irqd_is_wakeup_set(&desc->irq_data))) {
518 unsigned int irq = irq_desc_get_irq(desc);
519 const char *name = "(unnamed)";
520
521 if (desc->action && desc->action->name)
522 name = desc->action->name;
523
524 log_abnormal_wakeup_reason("misconfigured IRQ %u %s",
525 irq, name);
526 }
527 #endif
528 return true;
529 }
530
531 /*
532 * If the interrupt is an armed wakeup source, mark it pending
533 * and suspended, disable it and notify the pm core about the
534 * event.
535 */
536 if (irq_pm_check_wakeup(desc))
537 return false;
538
539 /*
540 * Handle a potential concurrent poll on a different core.
541 */
542 return irq_check_poll(desc);
543 }
544
545 /**
546 * handle_simple_irq - Simple and software-decoded IRQs.
547 * @desc: the interrupt description structure for this irq
548 *
549 * Simple interrupts are either sent from a demultiplexing interrupt
550 * handler or come from hardware, where no interrupt hardware control
551 * is necessary.
552 *
553 * Note: The caller is expected to handle the ack, clear, mask and
554 * unmask issues if necessary.
555 */
handle_simple_irq(struct irq_desc * desc)556 void handle_simple_irq(struct irq_desc *desc)
557 {
558 raw_spin_lock(&desc->lock);
559
560 if (!irq_may_run(desc))
561 goto out_unlock;
562
563 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
564
565 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
566 desc->istate |= IRQS_PENDING;
567 goto out_unlock;
568 }
569
570 kstat_incr_irqs_this_cpu(desc);
571 handle_irq_event(desc);
572
573 out_unlock:
574 raw_spin_unlock(&desc->lock);
575 }
576 EXPORT_SYMBOL_GPL(handle_simple_irq);
577
578 /**
579 * handle_untracked_irq - Simple and software-decoded IRQs.
580 * @desc: the interrupt description structure for this irq
581 *
582 * Untracked interrupts are sent from a demultiplexing interrupt
583 * handler when the demultiplexer does not know which device it its
584 * multiplexed irq domain generated the interrupt. IRQ's handled
585 * through here are not subjected to stats tracking, randomness, or
586 * spurious interrupt detection.
587 *
588 * Note: Like handle_simple_irq, the caller is expected to handle
589 * the ack, clear, mask and unmask issues if necessary.
590 */
handle_untracked_irq(struct irq_desc * desc)591 void handle_untracked_irq(struct irq_desc *desc)
592 {
593 unsigned int flags = 0;
594
595 raw_spin_lock(&desc->lock);
596
597 if (!irq_may_run(desc))
598 goto out_unlock;
599
600 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
601
602 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
603 desc->istate |= IRQS_PENDING;
604 goto out_unlock;
605 }
606
607 desc->istate &= ~IRQS_PENDING;
608 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
609 raw_spin_unlock(&desc->lock);
610
611 __handle_irq_event_percpu(desc, &flags);
612
613 raw_spin_lock(&desc->lock);
614 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
615
616 out_unlock:
617 raw_spin_unlock(&desc->lock);
618 }
619 EXPORT_SYMBOL_GPL(handle_untracked_irq);
620
621 /*
622 * Called unconditionally from handle_level_irq() and only for oneshot
623 * interrupts from handle_fasteoi_irq()
624 */
cond_unmask_irq(struct irq_desc * desc)625 static void cond_unmask_irq(struct irq_desc *desc)
626 {
627 /*
628 * We need to unmask in the following cases:
629 * - Standard level irq (IRQF_ONESHOT is not set)
630 * - Oneshot irq which did not wake the thread (caused by a
631 * spurious interrupt or a primary handler handling it
632 * completely).
633 */
634 if (!irqd_irq_disabled(&desc->irq_data) &&
635 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
636 unmask_irq(desc);
637 }
638
639 /**
640 * handle_level_irq - Level type irq handler
641 * @desc: the interrupt description structure for this irq
642 *
643 * Level type interrupts are active as long as the hardware line has
644 * the active level. This may require to mask the interrupt and unmask
645 * it after the associated handler has acknowledged the device, so the
646 * interrupt line is back to inactive.
647 */
handle_level_irq(struct irq_desc * desc)648 void handle_level_irq(struct irq_desc *desc)
649 {
650 raw_spin_lock(&desc->lock);
651 mask_ack_irq(desc);
652
653 if (!irq_may_run(desc))
654 goto out_unlock;
655
656 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
657
658 /*
659 * If its disabled or no action available
660 * keep it masked and get out of here
661 */
662 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
663 desc->istate |= IRQS_PENDING;
664 goto out_unlock;
665 }
666
667 kstat_incr_irqs_this_cpu(desc);
668 handle_irq_event(desc);
669
670 cond_unmask_irq(desc);
671
672 out_unlock:
673 raw_spin_unlock(&desc->lock);
674 }
675 EXPORT_SYMBOL_GPL(handle_level_irq);
676
677 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI
preflow_handler(struct irq_desc * desc)678 static inline void preflow_handler(struct irq_desc *desc)
679 {
680 if (desc->preflow_handler)
681 desc->preflow_handler(&desc->irq_data);
682 }
683 #else
preflow_handler(struct irq_desc * desc)684 static inline void preflow_handler(struct irq_desc *desc) { }
685 #endif
686
cond_unmask_eoi_irq(struct irq_desc * desc,struct irq_chip * chip)687 static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
688 {
689 if (!(desc->istate & IRQS_ONESHOT)) {
690 chip->irq_eoi(&desc->irq_data);
691 return;
692 }
693 /*
694 * We need to unmask in the following cases:
695 * - Oneshot irq which did not wake the thread (caused by a
696 * spurious interrupt or a primary handler handling it
697 * completely).
698 */
699 if (!irqd_irq_disabled(&desc->irq_data) &&
700 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
701 chip->irq_eoi(&desc->irq_data);
702 unmask_irq(desc);
703 } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
704 chip->irq_eoi(&desc->irq_data);
705 }
706 }
707
708 /**
709 * handle_fasteoi_irq - irq handler for transparent controllers
710 * @desc: the interrupt description structure for this irq
711 *
712 * Only a single callback will be issued to the chip: an ->eoi()
713 * call when the interrupt has been serviced. This enables support
714 * for modern forms of interrupt handlers, which handle the flow
715 * details in hardware, transparently.
716 */
handle_fasteoi_irq(struct irq_desc * desc)717 void handle_fasteoi_irq(struct irq_desc *desc)
718 {
719 struct irq_chip *chip = desc->irq_data.chip;
720
721 raw_spin_lock(&desc->lock);
722
723 if (!irq_may_run(desc))
724 goto out;
725
726 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
727
728 /*
729 * If its disabled or no action available
730 * then mask it and get out of here:
731 */
732 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
733 desc->istate |= IRQS_PENDING;
734 mask_irq(desc);
735 goto out;
736 }
737
738 kstat_incr_irqs_this_cpu(desc);
739 if (desc->istate & IRQS_ONESHOT)
740 mask_irq(desc);
741
742 preflow_handler(desc);
743 handle_irq_event(desc);
744
745 cond_unmask_eoi_irq(desc, chip);
746
747 raw_spin_unlock(&desc->lock);
748 return;
749 out:
750 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
751 chip->irq_eoi(&desc->irq_data);
752 raw_spin_unlock(&desc->lock);
753 }
754 EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
755
756 /**
757 * handle_fasteoi_nmi - irq handler for NMI interrupt lines
758 * @desc: the interrupt description structure for this irq
759 *
760 * A simple NMI-safe handler, considering the restrictions
761 * from request_nmi.
762 *
763 * Only a single callback will be issued to the chip: an ->eoi()
764 * call when the interrupt has been serviced. This enables support
765 * for modern forms of interrupt handlers, which handle the flow
766 * details in hardware, transparently.
767 */
handle_fasteoi_nmi(struct irq_desc * desc)768 void handle_fasteoi_nmi(struct irq_desc *desc)
769 {
770 struct irq_chip *chip = irq_desc_get_chip(desc);
771 struct irqaction *action = desc->action;
772 unsigned int irq = irq_desc_get_irq(desc);
773 irqreturn_t res;
774
775 __kstat_incr_irqs_this_cpu(desc);
776
777 trace_irq_handler_entry(irq, action);
778 /*
779 * NMIs cannot be shared, there is only one action.
780 */
781 res = action->handler(irq, action->dev_id);
782 trace_irq_handler_exit(irq, action, res);
783
784 if (chip->irq_eoi)
785 chip->irq_eoi(&desc->irq_data);
786 }
787 EXPORT_SYMBOL_GPL(handle_fasteoi_nmi);
788
789 /**
790 * handle_edge_irq - edge type IRQ handler
791 * @desc: the interrupt description structure for this irq
792 *
793 * Interrupt occures on the falling and/or rising edge of a hardware
794 * signal. The occurrence is latched into the irq controller hardware
795 * and must be acked in order to be reenabled. After the ack another
796 * interrupt can happen on the same source even before the first one
797 * is handled by the associated event handler. If this happens it
798 * might be necessary to disable (mask) the interrupt depending on the
799 * controller hardware. This requires to reenable the interrupt inside
800 * of the loop which handles the interrupts which have arrived while
801 * the handler was running. If all pending interrupts are handled, the
802 * loop is left.
803 */
handle_edge_irq(struct irq_desc * desc)804 void handle_edge_irq(struct irq_desc *desc)
805 {
806 raw_spin_lock(&desc->lock);
807
808 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
809
810 if (!irq_may_run(desc)) {
811 desc->istate |= IRQS_PENDING;
812 mask_ack_irq(desc);
813 goto out_unlock;
814 }
815
816 /*
817 * If its disabled or no action available then mask it and get
818 * out of here.
819 */
820 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
821 desc->istate |= IRQS_PENDING;
822 mask_ack_irq(desc);
823 goto out_unlock;
824 }
825
826 kstat_incr_irqs_this_cpu(desc);
827
828 /* Start handling the irq */
829 desc->irq_data.chip->irq_ack(&desc->irq_data);
830
831 do {
832 if (unlikely(!desc->action)) {
833 mask_irq(desc);
834 goto out_unlock;
835 }
836
837 /*
838 * When another irq arrived while we were handling
839 * one, we could have masked the irq.
840 * Renable it, if it was not disabled in meantime.
841 */
842 if (unlikely(desc->istate & IRQS_PENDING)) {
843 if (!irqd_irq_disabled(&desc->irq_data) &&
844 irqd_irq_masked(&desc->irq_data))
845 unmask_irq(desc);
846 }
847
848 handle_irq_event(desc);
849
850 } while ((desc->istate & IRQS_PENDING) &&
851 !irqd_irq_disabled(&desc->irq_data));
852
853 out_unlock:
854 raw_spin_unlock(&desc->lock);
855 }
856 EXPORT_SYMBOL(handle_edge_irq);
857
858 #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
859 /**
860 * handle_edge_eoi_irq - edge eoi type IRQ handler
861 * @desc: the interrupt description structure for this irq
862 *
863 * Similar as the above handle_edge_irq, but using eoi and w/o the
864 * mask/unmask logic.
865 */
handle_edge_eoi_irq(struct irq_desc * desc)866 void handle_edge_eoi_irq(struct irq_desc *desc)
867 {
868 struct irq_chip *chip = irq_desc_get_chip(desc);
869
870 raw_spin_lock(&desc->lock);
871
872 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
873
874 if (!irq_may_run(desc)) {
875 desc->istate |= IRQS_PENDING;
876 goto out_eoi;
877 }
878
879 /*
880 * If its disabled or no action available then mask it and get
881 * out of here.
882 */
883 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
884 desc->istate |= IRQS_PENDING;
885 goto out_eoi;
886 }
887
888 kstat_incr_irqs_this_cpu(desc);
889
890 do {
891 if (unlikely(!desc->action))
892 goto out_eoi;
893
894 handle_irq_event(desc);
895
896 } while ((desc->istate & IRQS_PENDING) &&
897 !irqd_irq_disabled(&desc->irq_data));
898
899 out_eoi:
900 chip->irq_eoi(&desc->irq_data);
901 raw_spin_unlock(&desc->lock);
902 }
903 #endif
904
905 /**
906 * handle_percpu_irq - Per CPU local irq handler
907 * @desc: the interrupt description structure for this irq
908 *
909 * Per CPU interrupts on SMP machines without locking requirements
910 */
handle_percpu_irq(struct irq_desc * desc)911 void handle_percpu_irq(struct irq_desc *desc)
912 {
913 struct irq_chip *chip = irq_desc_get_chip(desc);
914
915 /*
916 * PER CPU interrupts are not serialized. Do not touch
917 * desc->tot_count.
918 */
919 __kstat_incr_irqs_this_cpu(desc);
920
921 if (chip->irq_ack)
922 chip->irq_ack(&desc->irq_data);
923
924 handle_irq_event_percpu(desc);
925
926 if (chip->irq_eoi)
927 chip->irq_eoi(&desc->irq_data);
928 }
929
930 /**
931 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
932 * @desc: the interrupt description structure for this irq
933 *
934 * Per CPU interrupts on SMP machines without locking requirements. Same as
935 * handle_percpu_irq() above but with the following extras:
936 *
937 * action->percpu_dev_id is a pointer to percpu variables which
938 * contain the real device id for the cpu on which this handler is
939 * called
940 */
handle_percpu_devid_irq(struct irq_desc * desc)941 void handle_percpu_devid_irq(struct irq_desc *desc)
942 {
943 struct irq_chip *chip = irq_desc_get_chip(desc);
944 struct irqaction *action = desc->action;
945 unsigned int irq = irq_desc_get_irq(desc);
946 irqreturn_t res;
947
948 /*
949 * PER CPU interrupts are not serialized. Do not touch
950 * desc->tot_count.
951 */
952 __kstat_incr_irqs_this_cpu(desc);
953
954 if (chip->irq_ack)
955 chip->irq_ack(&desc->irq_data);
956
957 if (likely(action)) {
958 trace_irq_handler_entry(irq, action);
959 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
960 trace_irq_handler_exit(irq, action, res);
961 } else {
962 unsigned int cpu = smp_processor_id();
963 bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
964
965 if (enabled)
966 irq_percpu_disable(desc, cpu);
967
968 pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n",
969 enabled ? " and unmasked" : "", irq, cpu);
970 }
971
972 if (chip->irq_eoi)
973 chip->irq_eoi(&desc->irq_data);
974 }
975
976 /**
977 * handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu
978 * dev ids
979 * @desc: the interrupt description structure for this irq
980 *
981 * Similar to handle_fasteoi_nmi, but handling the dev_id cookie
982 * as a percpu pointer.
983 */
handle_percpu_devid_fasteoi_nmi(struct irq_desc * desc)984 void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc)
985 {
986 struct irq_chip *chip = irq_desc_get_chip(desc);
987 struct irqaction *action = desc->action;
988 unsigned int irq = irq_desc_get_irq(desc);
989 irqreturn_t res;
990
991 __kstat_incr_irqs_this_cpu(desc);
992
993 trace_irq_handler_entry(irq, action);
994 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
995 trace_irq_handler_exit(irq, action, res);
996
997 if (chip->irq_eoi)
998 chip->irq_eoi(&desc->irq_data);
999 }
1000
1001 static void
__irq_do_set_handler(struct irq_desc * desc,irq_flow_handler_t handle,int is_chained,const char * name)1002 __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
1003 int is_chained, const char *name)
1004 {
1005 if (!handle) {
1006 handle = handle_bad_irq;
1007 } else {
1008 struct irq_data *irq_data = &desc->irq_data;
1009 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1010 /*
1011 * With hierarchical domains we might run into a
1012 * situation where the outermost chip is not yet set
1013 * up, but the inner chips are there. Instead of
1014 * bailing we install the handler, but obviously we
1015 * cannot enable/startup the interrupt at this point.
1016 */
1017 while (irq_data) {
1018 if (irq_data->chip != &no_irq_chip)
1019 break;
1020 /*
1021 * Bail out if the outer chip is not set up
1022 * and the interrupt supposed to be started
1023 * right away.
1024 */
1025 if (WARN_ON(is_chained))
1026 return;
1027 /* Try the parent */
1028 irq_data = irq_data->parent_data;
1029 }
1030 #endif
1031 if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip))
1032 return;
1033 }
1034
1035 /* Uninstall? */
1036 if (handle == handle_bad_irq) {
1037 if (desc->irq_data.chip != &no_irq_chip)
1038 mask_ack_irq(desc);
1039 irq_state_set_disabled(desc);
1040 if (is_chained)
1041 desc->action = NULL;
1042 desc->depth = 1;
1043 }
1044 desc->handle_irq = handle;
1045 desc->name = name;
1046
1047 if (handle != handle_bad_irq && is_chained) {
1048 unsigned int type = irqd_get_trigger_type(&desc->irq_data);
1049
1050 /*
1051 * We're about to start this interrupt immediately,
1052 * hence the need to set the trigger configuration.
1053 * But the .set_type callback may have overridden the
1054 * flow handler, ignoring that we're dealing with a
1055 * chained interrupt. Reset it immediately because we
1056 * do know better.
1057 */
1058 if (type != IRQ_TYPE_NONE) {
1059 __irq_set_trigger(desc, type);
1060 desc->handle_irq = handle;
1061 }
1062
1063 irq_settings_set_noprobe(desc);
1064 irq_settings_set_norequest(desc);
1065 irq_settings_set_nothread(desc);
1066 desc->action = &chained_action;
1067 irq_activate_and_startup(desc, IRQ_RESEND);
1068 }
1069 }
1070
1071 void
__irq_set_handler(unsigned int irq,irq_flow_handler_t handle,int is_chained,const char * name)1072 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
1073 const char *name)
1074 {
1075 unsigned long flags;
1076 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
1077
1078 if (!desc)
1079 return;
1080
1081 __irq_do_set_handler(desc, handle, is_chained, name);
1082 irq_put_desc_busunlock(desc, flags);
1083 }
1084 EXPORT_SYMBOL_GPL(__irq_set_handler);
1085
1086 void
irq_set_chained_handler_and_data(unsigned int irq,irq_flow_handler_t handle,void * data)1087 irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
1088 void *data)
1089 {
1090 unsigned long flags;
1091 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
1092
1093 if (!desc)
1094 return;
1095
1096 desc->irq_common_data.handler_data = data;
1097 __irq_do_set_handler(desc, handle, 1, NULL);
1098
1099 irq_put_desc_busunlock(desc, flags);
1100 }
1101 EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data);
1102
1103 void
irq_set_chip_and_handler_name(unsigned int irq,struct irq_chip * chip,irq_flow_handler_t handle,const char * name)1104 irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
1105 irq_flow_handler_t handle, const char *name)
1106 {
1107 irq_set_chip(irq, chip);
1108 __irq_set_handler(irq, handle, 0, name);
1109 }
1110 EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
1111
irq_modify_status(unsigned int irq,unsigned long clr,unsigned long set)1112 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
1113 {
1114 unsigned long flags, trigger, tmp;
1115 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
1116
1117 if (!desc)
1118 return;
1119
1120 /*
1121 * Warn when a driver sets the no autoenable flag on an already
1122 * active interrupt.
1123 */
1124 WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN));
1125
1126 irq_settings_clr_and_set(desc, clr, set);
1127
1128 trigger = irqd_get_trigger_type(&desc->irq_data);
1129
1130 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
1131 IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
1132 if (irq_settings_has_no_balance_set(desc))
1133 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1134 if (irq_settings_is_per_cpu(desc))
1135 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1136 if (irq_settings_can_move_pcntxt(desc))
1137 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
1138 if (irq_settings_is_level(desc))
1139 irqd_set(&desc->irq_data, IRQD_LEVEL);
1140
1141 tmp = irq_settings_get_trigger_mask(desc);
1142 if (tmp != IRQ_TYPE_NONE)
1143 trigger = tmp;
1144
1145 irqd_set(&desc->irq_data, trigger);
1146
1147 irq_put_desc_unlock(desc, flags);
1148 }
1149 EXPORT_SYMBOL_GPL(irq_modify_status);
1150
1151 /**
1152 * irq_cpu_online - Invoke all irq_cpu_online functions.
1153 *
1154 * Iterate through all irqs and invoke the chip.irq_cpu_online()
1155 * for each.
1156 */
irq_cpu_online(void)1157 void irq_cpu_online(void)
1158 {
1159 struct irq_desc *desc;
1160 struct irq_chip *chip;
1161 unsigned long flags;
1162 unsigned int irq;
1163
1164 for_each_active_irq(irq) {
1165 desc = irq_to_desc(irq);
1166 if (!desc)
1167 continue;
1168
1169 raw_spin_lock_irqsave(&desc->lock, flags);
1170
1171 chip = irq_data_get_irq_chip(&desc->irq_data);
1172 if (chip && chip->irq_cpu_online &&
1173 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
1174 !irqd_irq_disabled(&desc->irq_data)))
1175 chip->irq_cpu_online(&desc->irq_data);
1176
1177 raw_spin_unlock_irqrestore(&desc->lock, flags);
1178 }
1179 }
1180
1181 /**
1182 * irq_cpu_offline - Invoke all irq_cpu_offline functions.
1183 *
1184 * Iterate through all irqs and invoke the chip.irq_cpu_offline()
1185 * for each.
1186 */
irq_cpu_offline(void)1187 void irq_cpu_offline(void)
1188 {
1189 struct irq_desc *desc;
1190 struct irq_chip *chip;
1191 unsigned long flags;
1192 unsigned int irq;
1193
1194 for_each_active_irq(irq) {
1195 desc = irq_to_desc(irq);
1196 if (!desc)
1197 continue;
1198
1199 raw_spin_lock_irqsave(&desc->lock, flags);
1200
1201 chip = irq_data_get_irq_chip(&desc->irq_data);
1202 if (chip && chip->irq_cpu_offline &&
1203 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
1204 !irqd_irq_disabled(&desc->irq_data)))
1205 chip->irq_cpu_offline(&desc->irq_data);
1206
1207 raw_spin_unlock_irqrestore(&desc->lock, flags);
1208 }
1209 }
1210
1211 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1212
1213 #ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS
1214 /**
1215 * handle_fasteoi_ack_irq - irq handler for edge hierarchy
1216 * stacked on transparent controllers
1217 *
1218 * @desc: the interrupt description structure for this irq
1219 *
1220 * Like handle_fasteoi_irq(), but for use with hierarchy where
1221 * the irq_chip also needs to have its ->irq_ack() function
1222 * called.
1223 */
handle_fasteoi_ack_irq(struct irq_desc * desc)1224 void handle_fasteoi_ack_irq(struct irq_desc *desc)
1225 {
1226 struct irq_chip *chip = desc->irq_data.chip;
1227
1228 raw_spin_lock(&desc->lock);
1229
1230 if (!irq_may_run(desc))
1231 goto out;
1232
1233 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
1234
1235 /*
1236 * If its disabled or no action available
1237 * then mask it and get out of here:
1238 */
1239 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
1240 desc->istate |= IRQS_PENDING;
1241 mask_irq(desc);
1242 goto out;
1243 }
1244
1245 kstat_incr_irqs_this_cpu(desc);
1246 if (desc->istate & IRQS_ONESHOT)
1247 mask_irq(desc);
1248
1249 /* Start handling the irq */
1250 desc->irq_data.chip->irq_ack(&desc->irq_data);
1251
1252 preflow_handler(desc);
1253 handle_irq_event(desc);
1254
1255 cond_unmask_eoi_irq(desc, chip);
1256
1257 raw_spin_unlock(&desc->lock);
1258 return;
1259 out:
1260 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
1261 chip->irq_eoi(&desc->irq_data);
1262 raw_spin_unlock(&desc->lock);
1263 }
1264 EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq);
1265
1266 /**
1267 * handle_fasteoi_mask_irq - irq handler for level hierarchy
1268 * stacked on transparent controllers
1269 *
1270 * @desc: the interrupt description structure for this irq
1271 *
1272 * Like handle_fasteoi_irq(), but for use with hierarchy where
1273 * the irq_chip also needs to have its ->irq_mask_ack() function
1274 * called.
1275 */
handle_fasteoi_mask_irq(struct irq_desc * desc)1276 void handle_fasteoi_mask_irq(struct irq_desc *desc)
1277 {
1278 struct irq_chip *chip = desc->irq_data.chip;
1279
1280 raw_spin_lock(&desc->lock);
1281 mask_ack_irq(desc);
1282
1283 if (!irq_may_run(desc))
1284 goto out;
1285
1286 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
1287
1288 /*
1289 * If its disabled or no action available
1290 * then mask it and get out of here:
1291 */
1292 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
1293 desc->istate |= IRQS_PENDING;
1294 mask_irq(desc);
1295 goto out;
1296 }
1297
1298 kstat_incr_irqs_this_cpu(desc);
1299 if (desc->istate & IRQS_ONESHOT)
1300 mask_irq(desc);
1301
1302 preflow_handler(desc);
1303 handle_irq_event(desc);
1304
1305 cond_unmask_eoi_irq(desc, chip);
1306
1307 raw_spin_unlock(&desc->lock);
1308 return;
1309 out:
1310 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
1311 chip->irq_eoi(&desc->irq_data);
1312 raw_spin_unlock(&desc->lock);
1313 }
1314 EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq);
1315
1316 #endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */
1317
1318 /**
1319 * irq_chip_set_parent_state - set the state of a parent interrupt.
1320 *
1321 * @data: Pointer to interrupt specific data
1322 * @which: State to be restored (one of IRQCHIP_STATE_*)
1323 * @val: Value corresponding to @which
1324 *
1325 * Conditional success, if the underlying irqchip does not implement it.
1326 */
irq_chip_set_parent_state(struct irq_data * data,enum irqchip_irq_state which,bool val)1327 int irq_chip_set_parent_state(struct irq_data *data,
1328 enum irqchip_irq_state which,
1329 bool val)
1330 {
1331 data = data->parent_data;
1332
1333 if (!data || !data->chip->irq_set_irqchip_state)
1334 return 0;
1335
1336 return data->chip->irq_set_irqchip_state(data, which, val);
1337 }
1338 EXPORT_SYMBOL_GPL(irq_chip_set_parent_state);
1339
1340 /**
1341 * irq_chip_get_parent_state - get the state of a parent interrupt.
1342 *
1343 * @data: Pointer to interrupt specific data
1344 * @which: one of IRQCHIP_STATE_* the caller wants to know
1345 * @state: a pointer to a boolean where the state is to be stored
1346 *
1347 * Conditional success, if the underlying irqchip does not implement it.
1348 */
irq_chip_get_parent_state(struct irq_data * data,enum irqchip_irq_state which,bool * state)1349 int irq_chip_get_parent_state(struct irq_data *data,
1350 enum irqchip_irq_state which,
1351 bool *state)
1352 {
1353 data = data->parent_data;
1354
1355 if (!data || !data->chip->irq_get_irqchip_state)
1356 return 0;
1357
1358 return data->chip->irq_get_irqchip_state(data, which, state);
1359 }
1360 EXPORT_SYMBOL_GPL(irq_chip_get_parent_state);
1361
1362 /**
1363 * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
1364 * NULL)
1365 * @data: Pointer to interrupt specific data
1366 */
irq_chip_enable_parent(struct irq_data * data)1367 void irq_chip_enable_parent(struct irq_data *data)
1368 {
1369 data = data->parent_data;
1370 if (data->chip->irq_enable)
1371 data->chip->irq_enable(data);
1372 else
1373 data->chip->irq_unmask(data);
1374 }
1375 EXPORT_SYMBOL_GPL(irq_chip_enable_parent);
1376
1377 /**
1378 * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if
1379 * NULL)
1380 * @data: Pointer to interrupt specific data
1381 */
irq_chip_disable_parent(struct irq_data * data)1382 void irq_chip_disable_parent(struct irq_data *data)
1383 {
1384 data = data->parent_data;
1385 if (data->chip->irq_disable)
1386 data->chip->irq_disable(data);
1387 else
1388 data->chip->irq_mask(data);
1389 }
1390 EXPORT_SYMBOL_GPL(irq_chip_disable_parent);
1391
1392 /**
1393 * irq_chip_ack_parent - Acknowledge the parent interrupt
1394 * @data: Pointer to interrupt specific data
1395 */
irq_chip_ack_parent(struct irq_data * data)1396 void irq_chip_ack_parent(struct irq_data *data)
1397 {
1398 data = data->parent_data;
1399 data->chip->irq_ack(data);
1400 }
1401 EXPORT_SYMBOL_GPL(irq_chip_ack_parent);
1402
1403 /**
1404 * irq_chip_mask_parent - Mask the parent interrupt
1405 * @data: Pointer to interrupt specific data
1406 */
irq_chip_mask_parent(struct irq_data * data)1407 void irq_chip_mask_parent(struct irq_data *data)
1408 {
1409 data = data->parent_data;
1410 data->chip->irq_mask(data);
1411 }
1412 EXPORT_SYMBOL_GPL(irq_chip_mask_parent);
1413
1414 /**
1415 * irq_chip_mask_ack_parent - Mask and acknowledge the parent interrupt
1416 * @data: Pointer to interrupt specific data
1417 */
irq_chip_mask_ack_parent(struct irq_data * data)1418 void irq_chip_mask_ack_parent(struct irq_data *data)
1419 {
1420 data = data->parent_data;
1421 data->chip->irq_mask_ack(data);
1422 }
1423 EXPORT_SYMBOL_GPL(irq_chip_mask_ack_parent);
1424
1425 /**
1426 * irq_chip_unmask_parent - Unmask the parent interrupt
1427 * @data: Pointer to interrupt specific data
1428 */
irq_chip_unmask_parent(struct irq_data * data)1429 void irq_chip_unmask_parent(struct irq_data *data)
1430 {
1431 data = data->parent_data;
1432 data->chip->irq_unmask(data);
1433 }
1434 EXPORT_SYMBOL_GPL(irq_chip_unmask_parent);
1435
1436 /**
1437 * irq_chip_eoi_parent - Invoke EOI on the parent interrupt
1438 * @data: Pointer to interrupt specific data
1439 */
irq_chip_eoi_parent(struct irq_data * data)1440 void irq_chip_eoi_parent(struct irq_data *data)
1441 {
1442 data = data->parent_data;
1443 data->chip->irq_eoi(data);
1444 }
1445 EXPORT_SYMBOL_GPL(irq_chip_eoi_parent);
1446
1447 /**
1448 * irq_chip_set_affinity_parent - Set affinity on the parent interrupt
1449 * @data: Pointer to interrupt specific data
1450 * @dest: The affinity mask to set
1451 * @force: Flag to enforce setting (disable online checks)
1452 *
1453 * Conditinal, as the underlying parent chip might not implement it.
1454 */
irq_chip_set_affinity_parent(struct irq_data * data,const struct cpumask * dest,bool force)1455 int irq_chip_set_affinity_parent(struct irq_data *data,
1456 const struct cpumask *dest, bool force)
1457 {
1458 data = data->parent_data;
1459 if (data->chip->irq_set_affinity)
1460 return data->chip->irq_set_affinity(data, dest, force);
1461
1462 return -ENOSYS;
1463 }
1464 EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent);
1465
1466 /**
1467 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
1468 * @data: Pointer to interrupt specific data
1469 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
1470 *
1471 * Conditional, as the underlying parent chip might not implement it.
1472 */
irq_chip_set_type_parent(struct irq_data * data,unsigned int type)1473 int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
1474 {
1475 data = data->parent_data;
1476
1477 if (data->chip->irq_set_type)
1478 return data->chip->irq_set_type(data, type);
1479
1480 return -ENOSYS;
1481 }
1482 EXPORT_SYMBOL_GPL(irq_chip_set_type_parent);
1483
1484 /**
1485 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
1486 * @data: Pointer to interrupt specific data
1487 *
1488 * Iterate through the domain hierarchy of the interrupt and check
1489 * whether a hw retrigger function exists. If yes, invoke it.
1490 */
irq_chip_retrigger_hierarchy(struct irq_data * data)1491 int irq_chip_retrigger_hierarchy(struct irq_data *data)
1492 {
1493 for (data = data->parent_data; data; data = data->parent_data)
1494 if (data->chip && data->chip->irq_retrigger)
1495 return data->chip->irq_retrigger(data);
1496
1497 return 0;
1498 }
1499 EXPORT_SYMBOL_GPL(irq_chip_retrigger_hierarchy);
1500
1501 /**
1502 * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt
1503 * @data: Pointer to interrupt specific data
1504 * @vcpu_info: The vcpu affinity information
1505 */
irq_chip_set_vcpu_affinity_parent(struct irq_data * data,void * vcpu_info)1506 int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
1507 {
1508 data = data->parent_data;
1509 if (data->chip->irq_set_vcpu_affinity)
1510 return data->chip->irq_set_vcpu_affinity(data, vcpu_info);
1511
1512 return -ENOSYS;
1513 }
1514 EXPORT_SYMBOL_GPL(irq_chip_set_vcpu_affinity_parent);
1515 /**
1516 * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt
1517 * @data: Pointer to interrupt specific data
1518 * @on: Whether to set or reset the wake-up capability of this irq
1519 *
1520 * Conditional, as the underlying parent chip might not implement it.
1521 */
irq_chip_set_wake_parent(struct irq_data * data,unsigned int on)1522 int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
1523 {
1524 data = data->parent_data;
1525
1526 if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE)
1527 return 0;
1528
1529 if (data->chip->irq_set_wake)
1530 return data->chip->irq_set_wake(data, on);
1531
1532 return -ENOSYS;
1533 }
1534 EXPORT_SYMBOL_GPL(irq_chip_set_wake_parent);
1535
1536 /**
1537 * irq_chip_request_resources_parent - Request resources on the parent interrupt
1538 * @data: Pointer to interrupt specific data
1539 */
irq_chip_request_resources_parent(struct irq_data * data)1540 int irq_chip_request_resources_parent(struct irq_data *data)
1541 {
1542 data = data->parent_data;
1543
1544 if (data->chip->irq_request_resources)
1545 return data->chip->irq_request_resources(data);
1546
1547 /* no error on missing optional irq_chip::irq_request_resources */
1548 return 0;
1549 }
1550 EXPORT_SYMBOL_GPL(irq_chip_request_resources_parent);
1551
1552 /**
1553 * irq_chip_release_resources_parent - Release resources on the parent interrupt
1554 * @data: Pointer to interrupt specific data
1555 */
irq_chip_release_resources_parent(struct irq_data * data)1556 void irq_chip_release_resources_parent(struct irq_data *data)
1557 {
1558 data = data->parent_data;
1559 if (data->chip->irq_release_resources)
1560 data->chip->irq_release_resources(data);
1561 }
1562 EXPORT_SYMBOL_GPL(irq_chip_release_resources_parent);
1563 #endif
1564
1565 /**
1566 * irq_chip_compose_msi_msg - Componse msi message for a irq chip
1567 * @data: Pointer to interrupt specific data
1568 * @msg: Pointer to the MSI message
1569 *
1570 * For hierarchical domains we find the first chip in the hierarchy
1571 * which implements the irq_compose_msi_msg callback. For non
1572 * hierarchical we use the top level chip.
1573 */
irq_chip_compose_msi_msg(struct irq_data * data,struct msi_msg * msg)1574 int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1575 {
1576 struct irq_data *pos = NULL;
1577
1578 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1579 for (; data; data = data->parent_data)
1580 #endif
1581 if (data->chip && data->chip->irq_compose_msi_msg)
1582 pos = data;
1583 if (!pos)
1584 return -ENOSYS;
1585
1586 pos->chip->irq_compose_msi_msg(pos, msg);
1587
1588 return 0;
1589 }
1590
1591 /**
1592 * irq_chip_pm_get - Enable power for an IRQ chip
1593 * @data: Pointer to interrupt specific data
1594 *
1595 * Enable the power to the IRQ chip referenced by the interrupt data
1596 * structure.
1597 */
irq_chip_pm_get(struct irq_data * data)1598 int irq_chip_pm_get(struct irq_data *data)
1599 {
1600 int retval;
1601
1602 if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) {
1603 retval = pm_runtime_get_sync(data->chip->parent_device);
1604 if (retval < 0) {
1605 pm_runtime_put_noidle(data->chip->parent_device);
1606 return retval;
1607 }
1608 }
1609
1610 return 0;
1611 }
1612
1613 /**
1614 * irq_chip_pm_put - Disable power for an IRQ chip
1615 * @data: Pointer to interrupt specific data
1616 *
1617 * Disable the power to the IRQ chip referenced by the interrupt data
1618 * structure, belongs. Note that power will only be disabled, once this
1619 * function has been called for all IRQs that have called irq_chip_pm_get().
1620 */
irq_chip_pm_put(struct irq_data * data)1621 int irq_chip_pm_put(struct irq_data *data)
1622 {
1623 int retval = 0;
1624
1625 if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device)
1626 retval = pm_runtime_put(data->chip->parent_device);
1627
1628 return (retval < 0) ? retval : 0;
1629 }
1630