• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
5  *
6  * This file contains the core interrupt handling code, for irq-chip based
7  * architectures. Detailed information is available in
8  * Documentation/core-api/genericirq.rst
9  */
10 
11 #include <linux/irq.h>
12 #include <linux/msi.h>
13 #include <linux/module.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/irqdomain.h>
17 #include <linux/wakeup_reason.h>
18 
19 #include <trace/events/irq.h>
20 
21 #include "internals.h"
22 
bad_chained_irq(int irq,void * dev_id)23 static irqreturn_t bad_chained_irq(int irq, void *dev_id)
24 {
25 	WARN_ONCE(1, "Chained irq %d should not call an action\n", irq);
26 	return IRQ_NONE;
27 }
28 
29 /*
30  * Chained handlers should never call action on their IRQ. This default
31  * action will emit warning if such thing happens.
32  */
33 struct irqaction chained_action = {
34 	.handler = bad_chained_irq,
35 };
36 
37 /**
38  *	irq_set_chip - set the irq chip for an irq
39  *	@irq:	irq number
40  *	@chip:	pointer to irq chip description structure
41  */
irq_set_chip(unsigned int irq,const struct irq_chip * chip)42 int irq_set_chip(unsigned int irq, const struct irq_chip *chip)
43 {
44 	unsigned long flags;
45 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
46 
47 	if (!desc)
48 		return -EINVAL;
49 
50 	desc->irq_data.chip = (struct irq_chip *)(chip ?: &no_irq_chip);
51 	irq_put_desc_unlock(desc, flags);
52 	/*
53 	 * For !CONFIG_SPARSE_IRQ make the irq show up in
54 	 * allocated_irqs.
55 	 */
56 	irq_mark_irq(irq);
57 	return 0;
58 }
59 EXPORT_SYMBOL(irq_set_chip);
60 
61 /**
62  *	irq_set_irq_type - set the irq trigger type for an irq
63  *	@irq:	irq number
64  *	@type:	IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
65  */
irq_set_irq_type(unsigned int irq,unsigned int type)66 int irq_set_irq_type(unsigned int irq, unsigned int type)
67 {
68 	unsigned long flags;
69 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
70 	int ret = 0;
71 
72 	if (!desc)
73 		return -EINVAL;
74 
75 	ret = __irq_set_trigger(desc, type);
76 	irq_put_desc_busunlock(desc, flags);
77 	return ret;
78 }
79 EXPORT_SYMBOL(irq_set_irq_type);
80 
81 /**
82  *	irq_set_handler_data - set irq handler data for an irq
83  *	@irq:	Interrupt number
84  *	@data:	Pointer to interrupt specific data
85  *
86  *	Set the hardware irq controller data for an irq
87  */
irq_set_handler_data(unsigned int irq,void * data)88 int irq_set_handler_data(unsigned int irq, void *data)
89 {
90 	unsigned long flags;
91 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
92 
93 	if (!desc)
94 		return -EINVAL;
95 	desc->irq_common_data.handler_data = data;
96 	irq_put_desc_unlock(desc, flags);
97 	return 0;
98 }
99 EXPORT_SYMBOL(irq_set_handler_data);
100 
101 /**
102  *	irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
103  *	@irq_base:	Interrupt number base
104  *	@irq_offset:	Interrupt number offset
105  *	@entry:		Pointer to MSI descriptor data
106  *
107  *	Set the MSI descriptor entry for an irq at offset
108  */
irq_set_msi_desc_off(unsigned int irq_base,unsigned int irq_offset,struct msi_desc * entry)109 int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
110 			 struct msi_desc *entry)
111 {
112 	unsigned long flags;
113 	struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
114 
115 	if (!desc)
116 		return -EINVAL;
117 	desc->irq_common_data.msi_desc = entry;
118 	if (entry && !irq_offset)
119 		entry->irq = irq_base;
120 	irq_put_desc_unlock(desc, flags);
121 	return 0;
122 }
123 
124 /**
125  *	irq_set_msi_desc - set MSI descriptor data for an irq
126  *	@irq:	Interrupt number
127  *	@entry:	Pointer to MSI descriptor data
128  *
129  *	Set the MSI descriptor entry for an irq
130  */
irq_set_msi_desc(unsigned int irq,struct msi_desc * entry)131 int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
132 {
133 	return irq_set_msi_desc_off(irq, 0, entry);
134 }
135 
136 /**
137  *	irq_set_chip_data - set irq chip data for an irq
138  *	@irq:	Interrupt number
139  *	@data:	Pointer to chip specific data
140  *
141  *	Set the hardware irq chip data for an irq
142  */
irq_set_chip_data(unsigned int irq,void * data)143 int irq_set_chip_data(unsigned int irq, void *data)
144 {
145 	unsigned long flags;
146 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
147 
148 	if (!desc)
149 		return -EINVAL;
150 	desc->irq_data.chip_data = data;
151 	irq_put_desc_unlock(desc, flags);
152 	return 0;
153 }
154 EXPORT_SYMBOL(irq_set_chip_data);
155 
irq_get_irq_data(unsigned int irq)156 struct irq_data *irq_get_irq_data(unsigned int irq)
157 {
158 	struct irq_desc *desc = irq_to_desc(irq);
159 
160 	return desc ? &desc->irq_data : NULL;
161 }
162 EXPORT_SYMBOL_GPL(irq_get_irq_data);
163 
irq_state_clr_disabled(struct irq_desc * desc)164 static void irq_state_clr_disabled(struct irq_desc *desc)
165 {
166 	irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
167 }
168 
irq_state_clr_masked(struct irq_desc * desc)169 static void irq_state_clr_masked(struct irq_desc *desc)
170 {
171 	irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
172 }
173 
irq_state_clr_started(struct irq_desc * desc)174 static void irq_state_clr_started(struct irq_desc *desc)
175 {
176 	irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED);
177 }
178 
irq_state_set_started(struct irq_desc * desc)179 static void irq_state_set_started(struct irq_desc *desc)
180 {
181 	irqd_set(&desc->irq_data, IRQD_IRQ_STARTED);
182 }
183 
184 enum {
185 	IRQ_STARTUP_NORMAL,
186 	IRQ_STARTUP_MANAGED,
187 	IRQ_STARTUP_ABORT,
188 };
189 
190 #ifdef CONFIG_SMP
191 static int
__irq_startup_managed(struct irq_desc * desc,const struct cpumask * aff,bool force)192 __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff,
193 		      bool force)
194 {
195 	struct irq_data *d = irq_desc_get_irq_data(desc);
196 
197 	if (!irqd_affinity_is_managed(d))
198 		return IRQ_STARTUP_NORMAL;
199 
200 	irqd_clr_managed_shutdown(d);
201 
202 	if (cpumask_any_and(aff, cpu_online_mask) >= nr_cpu_ids) {
203 		/*
204 		 * Catch code which fiddles with enable_irq() on a managed
205 		 * and potentially shutdown IRQ. Chained interrupt
206 		 * installment or irq auto probing should not happen on
207 		 * managed irqs either.
208 		 */
209 		if (WARN_ON_ONCE(force))
210 			return IRQ_STARTUP_ABORT;
211 		/*
212 		 * The interrupt was requested, but there is no online CPU
213 		 * in it's affinity mask. Put it into managed shutdown
214 		 * state and let the cpu hotplug mechanism start it up once
215 		 * a CPU in the mask becomes available.
216 		 */
217 		return IRQ_STARTUP_ABORT;
218 	}
219 	/*
220 	 * Managed interrupts have reserved resources, so this should not
221 	 * happen.
222 	 */
223 	if (WARN_ON(irq_domain_activate_irq(d, false)))
224 		return IRQ_STARTUP_ABORT;
225 	return IRQ_STARTUP_MANAGED;
226 }
227 #else
228 static __always_inline int
__irq_startup_managed(struct irq_desc * desc,const struct cpumask * aff,bool force)229 __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff,
230 		      bool force)
231 {
232 	return IRQ_STARTUP_NORMAL;
233 }
234 #endif
235 
__irq_startup(struct irq_desc * desc)236 static int __irq_startup(struct irq_desc *desc)
237 {
238 	struct irq_data *d = irq_desc_get_irq_data(desc);
239 	int ret = 0;
240 
241 	/* Warn if this interrupt is not activated but try nevertheless */
242 	WARN_ON_ONCE(!irqd_is_activated(d));
243 
244 	if (d->chip->irq_startup) {
245 		ret = d->chip->irq_startup(d);
246 		irq_state_clr_disabled(desc);
247 		irq_state_clr_masked(desc);
248 	} else {
249 		irq_enable(desc);
250 	}
251 	irq_state_set_started(desc);
252 	return ret;
253 }
254 
irq_startup(struct irq_desc * desc,bool resend,bool force)255 int irq_startup(struct irq_desc *desc, bool resend, bool force)
256 {
257 	struct irq_data *d = irq_desc_get_irq_data(desc);
258 	const struct cpumask *aff = irq_data_get_affinity_mask(d);
259 	int ret = 0;
260 
261 	desc->depth = 0;
262 
263 	if (irqd_is_started(d)) {
264 		irq_enable(desc);
265 	} else {
266 		switch (__irq_startup_managed(desc, aff, force)) {
267 		case IRQ_STARTUP_NORMAL:
268 			if (d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP)
269 				irq_setup_affinity(desc);
270 			ret = __irq_startup(desc);
271 			if (!(d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP))
272 				irq_setup_affinity(desc);
273 			break;
274 		case IRQ_STARTUP_MANAGED:
275 			irq_do_set_affinity(d, aff, false);
276 			ret = __irq_startup(desc);
277 			break;
278 		case IRQ_STARTUP_ABORT:
279 			irqd_set_managed_shutdown(d);
280 			return 0;
281 		}
282 	}
283 	if (resend)
284 		check_irq_resend(desc, false);
285 
286 	return ret;
287 }
288 
irq_activate(struct irq_desc * desc)289 int irq_activate(struct irq_desc *desc)
290 {
291 	struct irq_data *d = irq_desc_get_irq_data(desc);
292 
293 	if (!irqd_affinity_is_managed(d))
294 		return irq_domain_activate_irq(d, false);
295 	return 0;
296 }
297 
irq_activate_and_startup(struct irq_desc * desc,bool resend)298 int irq_activate_and_startup(struct irq_desc *desc, bool resend)
299 {
300 	if (WARN_ON(irq_activate(desc)))
301 		return 0;
302 	return irq_startup(desc, resend, IRQ_START_FORCE);
303 }
304 
305 static void __irq_disable(struct irq_desc *desc, bool mask);
306 
irq_shutdown(struct irq_desc * desc)307 void irq_shutdown(struct irq_desc *desc)
308 {
309 	if (irqd_is_started(&desc->irq_data)) {
310 		clear_irq_resend(desc);
311 		desc->depth = 1;
312 		if (desc->irq_data.chip->irq_shutdown) {
313 			desc->irq_data.chip->irq_shutdown(&desc->irq_data);
314 			irq_state_set_disabled(desc);
315 			irq_state_set_masked(desc);
316 		} else {
317 			__irq_disable(desc, true);
318 		}
319 		irq_state_clr_started(desc);
320 	}
321 }
322 
323 
irq_shutdown_and_deactivate(struct irq_desc * desc)324 void irq_shutdown_and_deactivate(struct irq_desc *desc)
325 {
326 	irq_shutdown(desc);
327 	/*
328 	 * This must be called even if the interrupt was never started up,
329 	 * because the activation can happen before the interrupt is
330 	 * available for request/startup. It has it's own state tracking so
331 	 * it's safe to call it unconditionally.
332 	 */
333 	irq_domain_deactivate_irq(&desc->irq_data);
334 }
335 
irq_enable(struct irq_desc * desc)336 void irq_enable(struct irq_desc *desc)
337 {
338 	if (!irqd_irq_disabled(&desc->irq_data)) {
339 		unmask_irq(desc);
340 	} else {
341 		irq_state_clr_disabled(desc);
342 		if (desc->irq_data.chip->irq_enable) {
343 			desc->irq_data.chip->irq_enable(&desc->irq_data);
344 			irq_state_clr_masked(desc);
345 		} else {
346 			unmask_irq(desc);
347 		}
348 	}
349 }
350 
__irq_disable(struct irq_desc * desc,bool mask)351 static void __irq_disable(struct irq_desc *desc, bool mask)
352 {
353 	if (irqd_irq_disabled(&desc->irq_data)) {
354 		if (mask)
355 			mask_irq(desc);
356 	} else {
357 		irq_state_set_disabled(desc);
358 		if (desc->irq_data.chip->irq_disable) {
359 			desc->irq_data.chip->irq_disable(&desc->irq_data);
360 			irq_state_set_masked(desc);
361 		} else if (mask) {
362 			mask_irq(desc);
363 		}
364 	}
365 }
366 
367 /**
368  * irq_disable - Mark interrupt disabled
369  * @desc:	irq descriptor which should be disabled
370  *
371  * If the chip does not implement the irq_disable callback, we
372  * use a lazy disable approach. That means we mark the interrupt
373  * disabled, but leave the hardware unmasked. That's an
374  * optimization because we avoid the hardware access for the
375  * common case where no interrupt happens after we marked it
376  * disabled. If an interrupt happens, then the interrupt flow
377  * handler masks the line at the hardware level and marks it
378  * pending.
379  *
380  * If the interrupt chip does not implement the irq_disable callback,
381  * a driver can disable the lazy approach for a particular irq line by
382  * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can
383  * be used for devices which cannot disable the interrupt at the
384  * device level under certain circumstances and have to use
385  * disable_irq[_nosync] instead.
386  */
irq_disable(struct irq_desc * desc)387 void irq_disable(struct irq_desc *desc)
388 {
389 	__irq_disable(desc, irq_settings_disable_unlazy(desc));
390 }
391 
irq_percpu_enable(struct irq_desc * desc,unsigned int cpu)392 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
393 {
394 	if (desc->irq_data.chip->irq_enable)
395 		desc->irq_data.chip->irq_enable(&desc->irq_data);
396 	else
397 		desc->irq_data.chip->irq_unmask(&desc->irq_data);
398 	cpumask_set_cpu(cpu, desc->percpu_enabled);
399 }
400 
irq_percpu_disable(struct irq_desc * desc,unsigned int cpu)401 void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
402 {
403 	if (desc->irq_data.chip->irq_disable)
404 		desc->irq_data.chip->irq_disable(&desc->irq_data);
405 	else
406 		desc->irq_data.chip->irq_mask(&desc->irq_data);
407 	cpumask_clear_cpu(cpu, desc->percpu_enabled);
408 }
409 
mask_ack_irq(struct irq_desc * desc)410 static inline void mask_ack_irq(struct irq_desc *desc)
411 {
412 	if (desc->irq_data.chip->irq_mask_ack) {
413 		desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
414 		irq_state_set_masked(desc);
415 	} else {
416 		mask_irq(desc);
417 		if (desc->irq_data.chip->irq_ack)
418 			desc->irq_data.chip->irq_ack(&desc->irq_data);
419 	}
420 }
421 
mask_irq(struct irq_desc * desc)422 void mask_irq(struct irq_desc *desc)
423 {
424 	if (irqd_irq_masked(&desc->irq_data))
425 		return;
426 
427 	if (desc->irq_data.chip->irq_mask) {
428 		desc->irq_data.chip->irq_mask(&desc->irq_data);
429 		irq_state_set_masked(desc);
430 	}
431 }
432 
unmask_irq(struct irq_desc * desc)433 void unmask_irq(struct irq_desc *desc)
434 {
435 	if (!irqd_irq_masked(&desc->irq_data))
436 		return;
437 
438 	if (desc->irq_data.chip->irq_unmask) {
439 		desc->irq_data.chip->irq_unmask(&desc->irq_data);
440 		irq_state_clr_masked(desc);
441 	}
442 }
443 
unmask_threaded_irq(struct irq_desc * desc)444 void unmask_threaded_irq(struct irq_desc *desc)
445 {
446 	struct irq_chip *chip = desc->irq_data.chip;
447 
448 	if (chip->flags & IRQCHIP_EOI_THREADED)
449 		chip->irq_eoi(&desc->irq_data);
450 
451 	unmask_irq(desc);
452 }
453 
454 /*
455  *	handle_nested_irq - Handle a nested irq from a irq thread
456  *	@irq:	the interrupt number
457  *
458  *	Handle interrupts which are nested into a threaded interrupt
459  *	handler. The handler function is called inside the calling
460  *	threads context.
461  */
handle_nested_irq(unsigned int irq)462 void handle_nested_irq(unsigned int irq)
463 {
464 	struct irq_desc *desc = irq_to_desc(irq);
465 	struct irqaction *action;
466 	irqreturn_t action_ret;
467 
468 	might_sleep();
469 
470 	raw_spin_lock_irq(&desc->lock);
471 
472 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
473 
474 	action = desc->action;
475 	if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
476 		desc->istate |= IRQS_PENDING;
477 		raw_spin_unlock_irq(&desc->lock);
478 		return;
479 	}
480 
481 	kstat_incr_irqs_this_cpu(desc);
482 	atomic_inc(&desc->threads_active);
483 	raw_spin_unlock_irq(&desc->lock);
484 
485 	action_ret = IRQ_NONE;
486 	for_each_action_of_desc(desc, action)
487 		action_ret |= action->thread_fn(action->irq, action->dev_id);
488 
489 	if (!irq_settings_no_debug(desc))
490 		note_interrupt(desc, action_ret);
491 
492 	wake_threads_waitq(desc);
493 }
494 EXPORT_SYMBOL_GPL(handle_nested_irq);
495 
irq_check_poll(struct irq_desc * desc)496 static bool irq_check_poll(struct irq_desc *desc)
497 {
498 	if (!(desc->istate & IRQS_POLL_INPROGRESS))
499 		return false;
500 	return irq_wait_for_poll(desc);
501 }
502 
irq_may_run(struct irq_desc * desc)503 static bool irq_may_run(struct irq_desc *desc)
504 {
505 	unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;
506 
507 	/*
508 	 * If the interrupt is not in progress and is not an armed
509 	 * wakeup interrupt, proceed.
510 	 */
511 	if (!irqd_has_set(&desc->irq_data, mask)) {
512 #ifdef CONFIG_PM_SLEEP
513 		if (unlikely(desc->no_suspend_depth &&
514 			     irqd_is_wakeup_set(&desc->irq_data))) {
515 			unsigned int irq = irq_desc_get_irq(desc);
516 			const char *name = "(unnamed)";
517 
518 			if (desc->action && desc->action->name)
519 				name = desc->action->name;
520 
521 			log_abnormal_wakeup_reason("misconfigured IRQ %u %s",
522 						   irq, name);
523 		}
524 #endif
525 		return true;
526 	}
527 
528 	/*
529 	 * If the interrupt is an armed wakeup source, mark it pending
530 	 * and suspended, disable it and notify the pm core about the
531 	 * event.
532 	 */
533 	if (irq_pm_check_wakeup(desc))
534 		return false;
535 
536 	/*
537 	 * Handle a potential concurrent poll on a different core.
538 	 */
539 	return irq_check_poll(desc);
540 }
541 
542 /**
543  *	handle_simple_irq - Simple and software-decoded IRQs.
544  *	@desc:	the interrupt description structure for this irq
545  *
546  *	Simple interrupts are either sent from a demultiplexing interrupt
547  *	handler or come from hardware, where no interrupt hardware control
548  *	is necessary.
549  *
550  *	Note: The caller is expected to handle the ack, clear, mask and
551  *	unmask issues if necessary.
552  */
handle_simple_irq(struct irq_desc * desc)553 void handle_simple_irq(struct irq_desc *desc)
554 {
555 	raw_spin_lock(&desc->lock);
556 
557 	if (!irq_may_run(desc))
558 		goto out_unlock;
559 
560 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
561 
562 	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
563 		desc->istate |= IRQS_PENDING;
564 		goto out_unlock;
565 	}
566 
567 	kstat_incr_irqs_this_cpu(desc);
568 	handle_irq_event(desc);
569 
570 out_unlock:
571 	raw_spin_unlock(&desc->lock);
572 }
573 EXPORT_SYMBOL_GPL(handle_simple_irq);
574 
575 /**
576  *	handle_untracked_irq - Simple and software-decoded IRQs.
577  *	@desc:	the interrupt description structure for this irq
578  *
579  *	Untracked interrupts are sent from a demultiplexing interrupt
580  *	handler when the demultiplexer does not know which device it its
581  *	multiplexed irq domain generated the interrupt. IRQ's handled
582  *	through here are not subjected to stats tracking, randomness, or
583  *	spurious interrupt detection.
584  *
585  *	Note: Like handle_simple_irq, the caller is expected to handle
586  *	the ack, clear, mask and unmask issues if necessary.
587  */
handle_untracked_irq(struct irq_desc * desc)588 void handle_untracked_irq(struct irq_desc *desc)
589 {
590 	raw_spin_lock(&desc->lock);
591 
592 	if (!irq_may_run(desc))
593 		goto out_unlock;
594 
595 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
596 
597 	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
598 		desc->istate |= IRQS_PENDING;
599 		goto out_unlock;
600 	}
601 
602 	desc->istate &= ~IRQS_PENDING;
603 	irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
604 	raw_spin_unlock(&desc->lock);
605 
606 	__handle_irq_event_percpu(desc);
607 
608 	raw_spin_lock(&desc->lock);
609 	irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
610 
611 out_unlock:
612 	raw_spin_unlock(&desc->lock);
613 }
614 EXPORT_SYMBOL_GPL(handle_untracked_irq);
615 
616 /*
617  * Called unconditionally from handle_level_irq() and only for oneshot
618  * interrupts from handle_fasteoi_irq()
619  */
cond_unmask_irq(struct irq_desc * desc)620 static void cond_unmask_irq(struct irq_desc *desc)
621 {
622 	/*
623 	 * We need to unmask in the following cases:
624 	 * - Standard level irq (IRQF_ONESHOT is not set)
625 	 * - Oneshot irq which did not wake the thread (caused by a
626 	 *   spurious interrupt or a primary handler handling it
627 	 *   completely).
628 	 */
629 	if (!irqd_irq_disabled(&desc->irq_data) &&
630 	    irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
631 		unmask_irq(desc);
632 }
633 
634 /**
635  *	handle_level_irq - Level type irq handler
636  *	@desc:	the interrupt description structure for this irq
637  *
638  *	Level type interrupts are active as long as the hardware line has
639  *	the active level. This may require to mask the interrupt and unmask
640  *	it after the associated handler has acknowledged the device, so the
641  *	interrupt line is back to inactive.
642  */
handle_level_irq(struct irq_desc * desc)643 void handle_level_irq(struct irq_desc *desc)
644 {
645 	raw_spin_lock(&desc->lock);
646 	mask_ack_irq(desc);
647 
648 	if (!irq_may_run(desc))
649 		goto out_unlock;
650 
651 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
652 
653 	/*
654 	 * If its disabled or no action available
655 	 * keep it masked and get out of here
656 	 */
657 	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
658 		desc->istate |= IRQS_PENDING;
659 		goto out_unlock;
660 	}
661 
662 	kstat_incr_irqs_this_cpu(desc);
663 	handle_irq_event(desc);
664 
665 	cond_unmask_irq(desc);
666 
667 out_unlock:
668 	raw_spin_unlock(&desc->lock);
669 }
670 EXPORT_SYMBOL_GPL(handle_level_irq);
671 
cond_unmask_eoi_irq(struct irq_desc * desc,struct irq_chip * chip)672 static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
673 {
674 	if (!(desc->istate & IRQS_ONESHOT)) {
675 		chip->irq_eoi(&desc->irq_data);
676 		return;
677 	}
678 	/*
679 	 * We need to unmask in the following cases:
680 	 * - Oneshot irq which did not wake the thread (caused by a
681 	 *   spurious interrupt or a primary handler handling it
682 	 *   completely).
683 	 */
684 	if (!irqd_irq_disabled(&desc->irq_data) &&
685 	    irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
686 		chip->irq_eoi(&desc->irq_data);
687 		unmask_irq(desc);
688 	} else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
689 		chip->irq_eoi(&desc->irq_data);
690 	}
691 }
692 
693 /**
694  *	handle_fasteoi_irq - irq handler for transparent controllers
695  *	@desc:	the interrupt description structure for this irq
696  *
697  *	Only a single callback will be issued to the chip: an ->eoi()
698  *	call when the interrupt has been serviced. This enables support
699  *	for modern forms of interrupt handlers, which handle the flow
700  *	details in hardware, transparently.
701  */
handle_fasteoi_irq(struct irq_desc * desc)702 void handle_fasteoi_irq(struct irq_desc *desc)
703 {
704 	struct irq_chip *chip = desc->irq_data.chip;
705 
706 	raw_spin_lock(&desc->lock);
707 
708 	/*
709 	 * When an affinity change races with IRQ handling, the next interrupt
710 	 * can arrive on the new CPU before the original CPU has completed
711 	 * handling the previous one - it may need to be resent.
712 	 */
713 	if (!irq_may_run(desc)) {
714 		if (irqd_needs_resend_when_in_progress(&desc->irq_data))
715 			desc->istate |= IRQS_PENDING;
716 		goto out;
717 	}
718 
719 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
720 
721 	/*
722 	 * If its disabled or no action available
723 	 * then mask it and get out of here:
724 	 */
725 	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
726 		desc->istate |= IRQS_PENDING;
727 		mask_irq(desc);
728 		goto out;
729 	}
730 
731 	kstat_incr_irqs_this_cpu(desc);
732 	if (desc->istate & IRQS_ONESHOT)
733 		mask_irq(desc);
734 
735 	handle_irq_event(desc);
736 
737 	cond_unmask_eoi_irq(desc, chip);
738 
739 	/*
740 	 * When the race described above happens this will resend the interrupt.
741 	 */
742 	if (unlikely(desc->istate & IRQS_PENDING))
743 		check_irq_resend(desc, false);
744 
745 	raw_spin_unlock(&desc->lock);
746 	return;
747 out:
748 	if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
749 		chip->irq_eoi(&desc->irq_data);
750 	raw_spin_unlock(&desc->lock);
751 }
752 EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
753 
754 /**
755  *	handle_fasteoi_nmi - irq handler for NMI interrupt lines
756  *	@desc:	the interrupt description structure for this irq
757  *
758  *	A simple NMI-safe handler, considering the restrictions
759  *	from request_nmi.
760  *
761  *	Only a single callback will be issued to the chip: an ->eoi()
762  *	call when the interrupt has been serviced. This enables support
763  *	for modern forms of interrupt handlers, which handle the flow
764  *	details in hardware, transparently.
765  */
handle_fasteoi_nmi(struct irq_desc * desc)766 void handle_fasteoi_nmi(struct irq_desc *desc)
767 {
768 	struct irq_chip *chip = irq_desc_get_chip(desc);
769 	struct irqaction *action = desc->action;
770 	unsigned int irq = irq_desc_get_irq(desc);
771 	irqreturn_t res;
772 
773 	__kstat_incr_irqs_this_cpu(desc);
774 
775 	trace_irq_handler_entry(irq, action);
776 	/*
777 	 * NMIs cannot be shared, there is only one action.
778 	 */
779 	res = action->handler(irq, action->dev_id);
780 	trace_irq_handler_exit(irq, action, res);
781 
782 	if (chip->irq_eoi)
783 		chip->irq_eoi(&desc->irq_data);
784 }
785 EXPORT_SYMBOL_GPL(handle_fasteoi_nmi);
786 
787 /**
788  *	handle_edge_irq - edge type IRQ handler
789  *	@desc:	the interrupt description structure for this irq
790  *
791  *	Interrupt occurs on the falling and/or rising edge of a hardware
792  *	signal. The occurrence is latched into the irq controller hardware
793  *	and must be acked in order to be reenabled. After the ack another
794  *	interrupt can happen on the same source even before the first one
795  *	is handled by the associated event handler. If this happens it
796  *	might be necessary to disable (mask) the interrupt depending on the
797  *	controller hardware. This requires to reenable the interrupt inside
798  *	of the loop which handles the interrupts which have arrived while
799  *	the handler was running. If all pending interrupts are handled, the
800  *	loop is left.
801  */
handle_edge_irq(struct irq_desc * desc)802 void handle_edge_irq(struct irq_desc *desc)
803 {
804 	raw_spin_lock(&desc->lock);
805 
806 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
807 
808 	if (!irq_may_run(desc)) {
809 		desc->istate |= IRQS_PENDING;
810 		mask_ack_irq(desc);
811 		goto out_unlock;
812 	}
813 
814 	/*
815 	 * If its disabled or no action available then mask it and get
816 	 * out of here.
817 	 */
818 	if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
819 		desc->istate |= IRQS_PENDING;
820 		mask_ack_irq(desc);
821 		goto out_unlock;
822 	}
823 
824 	kstat_incr_irqs_this_cpu(desc);
825 
826 	/* Start handling the irq */
827 	desc->irq_data.chip->irq_ack(&desc->irq_data);
828 
829 	do {
830 		if (unlikely(!desc->action)) {
831 			mask_irq(desc);
832 			goto out_unlock;
833 		}
834 
835 		/*
836 		 * When another irq arrived while we were handling
837 		 * one, we could have masked the irq.
838 		 * Reenable it, if it was not disabled in meantime.
839 		 */
840 		if (unlikely(desc->istate & IRQS_PENDING)) {
841 			if (!irqd_irq_disabled(&desc->irq_data) &&
842 			    irqd_irq_masked(&desc->irq_data))
843 				unmask_irq(desc);
844 		}
845 
846 		handle_irq_event(desc);
847 
848 	} while ((desc->istate & IRQS_PENDING) &&
849 		 !irqd_irq_disabled(&desc->irq_data));
850 
851 out_unlock:
852 	raw_spin_unlock(&desc->lock);
853 }
854 EXPORT_SYMBOL(handle_edge_irq);
855 
856 #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
857 /**
858  *	handle_edge_eoi_irq - edge eoi type IRQ handler
859  *	@desc:	the interrupt description structure for this irq
860  *
861  * Similar as the above handle_edge_irq, but using eoi and w/o the
862  * mask/unmask logic.
863  */
handle_edge_eoi_irq(struct irq_desc * desc)864 void handle_edge_eoi_irq(struct irq_desc *desc)
865 {
866 	struct irq_chip *chip = irq_desc_get_chip(desc);
867 
868 	raw_spin_lock(&desc->lock);
869 
870 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
871 
872 	if (!irq_may_run(desc)) {
873 		desc->istate |= IRQS_PENDING;
874 		goto out_eoi;
875 	}
876 
877 	/*
878 	 * If its disabled or no action available then mask it and get
879 	 * out of here.
880 	 */
881 	if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
882 		desc->istate |= IRQS_PENDING;
883 		goto out_eoi;
884 	}
885 
886 	kstat_incr_irqs_this_cpu(desc);
887 
888 	do {
889 		if (unlikely(!desc->action))
890 			goto out_eoi;
891 
892 		handle_irq_event(desc);
893 
894 	} while ((desc->istate & IRQS_PENDING) &&
895 		 !irqd_irq_disabled(&desc->irq_data));
896 
897 out_eoi:
898 	chip->irq_eoi(&desc->irq_data);
899 	raw_spin_unlock(&desc->lock);
900 }
901 #endif
902 
903 /**
904  *	handle_percpu_irq - Per CPU local irq handler
905  *	@desc:	the interrupt description structure for this irq
906  *
907  *	Per CPU interrupts on SMP machines without locking requirements
908  */
handle_percpu_irq(struct irq_desc * desc)909 void handle_percpu_irq(struct irq_desc *desc)
910 {
911 	struct irq_chip *chip = irq_desc_get_chip(desc);
912 
913 	/*
914 	 * PER CPU interrupts are not serialized. Do not touch
915 	 * desc->tot_count.
916 	 */
917 	__kstat_incr_irqs_this_cpu(desc);
918 
919 	if (chip->irq_ack)
920 		chip->irq_ack(&desc->irq_data);
921 
922 	handle_irq_event_percpu(desc);
923 
924 	if (chip->irq_eoi)
925 		chip->irq_eoi(&desc->irq_data);
926 }
927 
928 /**
929  * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
930  * @desc:	the interrupt description structure for this irq
931  *
932  * Per CPU interrupts on SMP machines without locking requirements. Same as
933  * handle_percpu_irq() above but with the following extras:
934  *
935  * action->percpu_dev_id is a pointer to percpu variables which
936  * contain the real device id for the cpu on which this handler is
937  * called
938  */
handle_percpu_devid_irq(struct irq_desc * desc)939 void handle_percpu_devid_irq(struct irq_desc *desc)
940 {
941 	struct irq_chip *chip = irq_desc_get_chip(desc);
942 	struct irqaction *action = desc->action;
943 	unsigned int irq = irq_desc_get_irq(desc);
944 	irqreturn_t res;
945 
946 	/*
947 	 * PER CPU interrupts are not serialized. Do not touch
948 	 * desc->tot_count.
949 	 */
950 	__kstat_incr_irqs_this_cpu(desc);
951 
952 	if (chip->irq_ack)
953 		chip->irq_ack(&desc->irq_data);
954 
955 	if (likely(action)) {
956 		trace_irq_handler_entry(irq, action);
957 		res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
958 		trace_irq_handler_exit(irq, action, res);
959 	} else {
960 		unsigned int cpu = smp_processor_id();
961 		bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
962 
963 		if (enabled)
964 			irq_percpu_disable(desc, cpu);
965 
966 		pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n",
967 			    enabled ? " and unmasked" : "", irq, cpu);
968 	}
969 
970 	if (chip->irq_eoi)
971 		chip->irq_eoi(&desc->irq_data);
972 }
973 
974 /**
975  * handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu
976  *				     dev ids
977  * @desc:	the interrupt description structure for this irq
978  *
979  * Similar to handle_fasteoi_nmi, but handling the dev_id cookie
980  * as a percpu pointer.
981  */
handle_percpu_devid_fasteoi_nmi(struct irq_desc * desc)982 void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc)
983 {
984 	struct irq_chip *chip = irq_desc_get_chip(desc);
985 	struct irqaction *action = desc->action;
986 	unsigned int irq = irq_desc_get_irq(desc);
987 	irqreturn_t res;
988 
989 	__kstat_incr_irqs_this_cpu(desc);
990 
991 	trace_irq_handler_entry(irq, action);
992 	res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
993 	trace_irq_handler_exit(irq, action, res);
994 
995 	if (chip->irq_eoi)
996 		chip->irq_eoi(&desc->irq_data);
997 }
998 
999 static void
__irq_do_set_handler(struct irq_desc * desc,irq_flow_handler_t handle,int is_chained,const char * name)1000 __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
1001 		     int is_chained, const char *name)
1002 {
1003 	if (!handle) {
1004 		handle = handle_bad_irq;
1005 	} else {
1006 		struct irq_data *irq_data = &desc->irq_data;
1007 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1008 		/*
1009 		 * With hierarchical domains we might run into a
1010 		 * situation where the outermost chip is not yet set
1011 		 * up, but the inner chips are there.  Instead of
1012 		 * bailing we install the handler, but obviously we
1013 		 * cannot enable/startup the interrupt at this point.
1014 		 */
1015 		while (irq_data) {
1016 			if (irq_data->chip != &no_irq_chip)
1017 				break;
1018 			/*
1019 			 * Bail out if the outer chip is not set up
1020 			 * and the interrupt supposed to be started
1021 			 * right away.
1022 			 */
1023 			if (WARN_ON(is_chained))
1024 				return;
1025 			/* Try the parent */
1026 			irq_data = irq_data->parent_data;
1027 		}
1028 #endif
1029 		if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip))
1030 			return;
1031 	}
1032 
1033 	/* Uninstall? */
1034 	if (handle == handle_bad_irq) {
1035 		if (desc->irq_data.chip != &no_irq_chip)
1036 			mask_ack_irq(desc);
1037 		irq_state_set_disabled(desc);
1038 		if (is_chained) {
1039 			desc->action = NULL;
1040 			WARN_ON(irq_chip_pm_put(irq_desc_get_irq_data(desc)));
1041 		}
1042 		desc->depth = 1;
1043 	}
1044 	desc->handle_irq = handle;
1045 	desc->name = name;
1046 
1047 	if (handle != handle_bad_irq && is_chained) {
1048 		unsigned int type = irqd_get_trigger_type(&desc->irq_data);
1049 
1050 		/*
1051 		 * We're about to start this interrupt immediately,
1052 		 * hence the need to set the trigger configuration.
1053 		 * But the .set_type callback may have overridden the
1054 		 * flow handler, ignoring that we're dealing with a
1055 		 * chained interrupt. Reset it immediately because we
1056 		 * do know better.
1057 		 */
1058 		if (type != IRQ_TYPE_NONE) {
1059 			__irq_set_trigger(desc, type);
1060 			desc->handle_irq = handle;
1061 		}
1062 
1063 		irq_settings_set_noprobe(desc);
1064 		irq_settings_set_norequest(desc);
1065 		irq_settings_set_nothread(desc);
1066 		desc->action = &chained_action;
1067 		WARN_ON(irq_chip_pm_get(irq_desc_get_irq_data(desc)));
1068 		irq_activate_and_startup(desc, IRQ_RESEND);
1069 	}
1070 }
1071 
1072 void
__irq_set_handler(unsigned int irq,irq_flow_handler_t handle,int is_chained,const char * name)1073 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
1074 		  const char *name)
1075 {
1076 	unsigned long flags;
1077 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
1078 
1079 	if (!desc)
1080 		return;
1081 
1082 	__irq_do_set_handler(desc, handle, is_chained, name);
1083 	irq_put_desc_busunlock(desc, flags);
1084 }
1085 EXPORT_SYMBOL_GPL(__irq_set_handler);
1086 
1087 void
irq_set_chained_handler_and_data(unsigned int irq,irq_flow_handler_t handle,void * data)1088 irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
1089 				 void *data)
1090 {
1091 	unsigned long flags;
1092 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
1093 
1094 	if (!desc)
1095 		return;
1096 
1097 	desc->irq_common_data.handler_data = data;
1098 	__irq_do_set_handler(desc, handle, 1, NULL);
1099 
1100 	irq_put_desc_busunlock(desc, flags);
1101 }
1102 EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data);
1103 
1104 void
irq_set_chip_and_handler_name(unsigned int irq,const struct irq_chip * chip,irq_flow_handler_t handle,const char * name)1105 irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip,
1106 			      irq_flow_handler_t handle, const char *name)
1107 {
1108 	irq_set_chip(irq, chip);
1109 	__irq_set_handler(irq, handle, 0, name);
1110 }
1111 EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
1112 
irq_modify_status(unsigned int irq,unsigned long clr,unsigned long set)1113 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
1114 {
1115 	unsigned long flags, trigger, tmp;
1116 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
1117 
1118 	if (!desc)
1119 		return;
1120 
1121 	/*
1122 	 * Warn when a driver sets the no autoenable flag on an already
1123 	 * active interrupt.
1124 	 */
1125 	WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN));
1126 
1127 	irq_settings_clr_and_set(desc, clr, set);
1128 
1129 	trigger = irqd_get_trigger_type(&desc->irq_data);
1130 
1131 	irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
1132 		   IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
1133 	if (irq_settings_has_no_balance_set(desc))
1134 		irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1135 	if (irq_settings_is_per_cpu(desc))
1136 		irqd_set(&desc->irq_data, IRQD_PER_CPU);
1137 	if (irq_settings_can_move_pcntxt(desc))
1138 		irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
1139 	if (irq_settings_is_level(desc))
1140 		irqd_set(&desc->irq_data, IRQD_LEVEL);
1141 
1142 	tmp = irq_settings_get_trigger_mask(desc);
1143 	if (tmp != IRQ_TYPE_NONE)
1144 		trigger = tmp;
1145 
1146 	irqd_set(&desc->irq_data, trigger);
1147 
1148 	irq_put_desc_unlock(desc, flags);
1149 }
1150 EXPORT_SYMBOL_GPL(irq_modify_status);
1151 
1152 #ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE
1153 /**
1154  *	irq_cpu_online - Invoke all irq_cpu_online functions.
1155  *
1156  *	Iterate through all irqs and invoke the chip.irq_cpu_online()
1157  *	for each.
1158  */
irq_cpu_online(void)1159 void irq_cpu_online(void)
1160 {
1161 	struct irq_desc *desc;
1162 	struct irq_chip *chip;
1163 	unsigned long flags;
1164 	unsigned int irq;
1165 
1166 	for_each_active_irq(irq) {
1167 		desc = irq_to_desc(irq);
1168 		if (!desc)
1169 			continue;
1170 
1171 		raw_spin_lock_irqsave(&desc->lock, flags);
1172 
1173 		chip = irq_data_get_irq_chip(&desc->irq_data);
1174 		if (chip && chip->irq_cpu_online &&
1175 		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
1176 		     !irqd_irq_disabled(&desc->irq_data)))
1177 			chip->irq_cpu_online(&desc->irq_data);
1178 
1179 		raw_spin_unlock_irqrestore(&desc->lock, flags);
1180 	}
1181 }
1182 
1183 /**
1184  *	irq_cpu_offline - Invoke all irq_cpu_offline functions.
1185  *
1186  *	Iterate through all irqs and invoke the chip.irq_cpu_offline()
1187  *	for each.
1188  */
irq_cpu_offline(void)1189 void irq_cpu_offline(void)
1190 {
1191 	struct irq_desc *desc;
1192 	struct irq_chip *chip;
1193 	unsigned long flags;
1194 	unsigned int irq;
1195 
1196 	for_each_active_irq(irq) {
1197 		desc = irq_to_desc(irq);
1198 		if (!desc)
1199 			continue;
1200 
1201 		raw_spin_lock_irqsave(&desc->lock, flags);
1202 
1203 		chip = irq_data_get_irq_chip(&desc->irq_data);
1204 		if (chip && chip->irq_cpu_offline &&
1205 		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
1206 		     !irqd_irq_disabled(&desc->irq_data)))
1207 			chip->irq_cpu_offline(&desc->irq_data);
1208 
1209 		raw_spin_unlock_irqrestore(&desc->lock, flags);
1210 	}
1211 }
1212 #endif
1213 
1214 #ifdef	CONFIG_IRQ_DOMAIN_HIERARCHY
1215 
1216 #ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS
1217 /**
1218  *	handle_fasteoi_ack_irq - irq handler for edge hierarchy
1219  *	stacked on transparent controllers
1220  *
1221  *	@desc:	the interrupt description structure for this irq
1222  *
1223  *	Like handle_fasteoi_irq(), but for use with hierarchy where
1224  *	the irq_chip also needs to have its ->irq_ack() function
1225  *	called.
1226  */
handle_fasteoi_ack_irq(struct irq_desc * desc)1227 void handle_fasteoi_ack_irq(struct irq_desc *desc)
1228 {
1229 	struct irq_chip *chip = desc->irq_data.chip;
1230 
1231 	raw_spin_lock(&desc->lock);
1232 
1233 	if (!irq_may_run(desc))
1234 		goto out;
1235 
1236 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
1237 
1238 	/*
1239 	 * If its disabled or no action available
1240 	 * then mask it and get out of here:
1241 	 */
1242 	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
1243 		desc->istate |= IRQS_PENDING;
1244 		mask_irq(desc);
1245 		goto out;
1246 	}
1247 
1248 	kstat_incr_irqs_this_cpu(desc);
1249 	if (desc->istate & IRQS_ONESHOT)
1250 		mask_irq(desc);
1251 
1252 	/* Start handling the irq */
1253 	desc->irq_data.chip->irq_ack(&desc->irq_data);
1254 
1255 	handle_irq_event(desc);
1256 
1257 	cond_unmask_eoi_irq(desc, chip);
1258 
1259 	raw_spin_unlock(&desc->lock);
1260 	return;
1261 out:
1262 	if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
1263 		chip->irq_eoi(&desc->irq_data);
1264 	raw_spin_unlock(&desc->lock);
1265 }
1266 EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq);
1267 
1268 /**
1269  *	handle_fasteoi_mask_irq - irq handler for level hierarchy
1270  *	stacked on transparent controllers
1271  *
1272  *	@desc:	the interrupt description structure for this irq
1273  *
1274  *	Like handle_fasteoi_irq(), but for use with hierarchy where
1275  *	the irq_chip also needs to have its ->irq_mask_ack() function
1276  *	called.
1277  */
handle_fasteoi_mask_irq(struct irq_desc * desc)1278 void handle_fasteoi_mask_irq(struct irq_desc *desc)
1279 {
1280 	struct irq_chip *chip = desc->irq_data.chip;
1281 
1282 	raw_spin_lock(&desc->lock);
1283 	mask_ack_irq(desc);
1284 
1285 	if (!irq_may_run(desc))
1286 		goto out;
1287 
1288 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
1289 
1290 	/*
1291 	 * If its disabled or no action available
1292 	 * then mask it and get out of here:
1293 	 */
1294 	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
1295 		desc->istate |= IRQS_PENDING;
1296 		mask_irq(desc);
1297 		goto out;
1298 	}
1299 
1300 	kstat_incr_irqs_this_cpu(desc);
1301 	if (desc->istate & IRQS_ONESHOT)
1302 		mask_irq(desc);
1303 
1304 	handle_irq_event(desc);
1305 
1306 	cond_unmask_eoi_irq(desc, chip);
1307 
1308 	raw_spin_unlock(&desc->lock);
1309 	return;
1310 out:
1311 	if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
1312 		chip->irq_eoi(&desc->irq_data);
1313 	raw_spin_unlock(&desc->lock);
1314 }
1315 EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq);
1316 
1317 #endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */
1318 
1319 /**
1320  * irq_chip_set_parent_state - set the state of a parent interrupt.
1321  *
1322  * @data: Pointer to interrupt specific data
1323  * @which: State to be restored (one of IRQCHIP_STATE_*)
1324  * @val: Value corresponding to @which
1325  *
1326  * Conditional success, if the underlying irqchip does not implement it.
1327  */
irq_chip_set_parent_state(struct irq_data * data,enum irqchip_irq_state which,bool val)1328 int irq_chip_set_parent_state(struct irq_data *data,
1329 			      enum irqchip_irq_state which,
1330 			      bool val)
1331 {
1332 	data = data->parent_data;
1333 
1334 	if (!data || !data->chip->irq_set_irqchip_state)
1335 		return 0;
1336 
1337 	return data->chip->irq_set_irqchip_state(data, which, val);
1338 }
1339 EXPORT_SYMBOL_GPL(irq_chip_set_parent_state);
1340 
1341 /**
1342  * irq_chip_get_parent_state - get the state of a parent interrupt.
1343  *
1344  * @data: Pointer to interrupt specific data
1345  * @which: one of IRQCHIP_STATE_* the caller wants to know
1346  * @state: a pointer to a boolean where the state is to be stored
1347  *
1348  * Conditional success, if the underlying irqchip does not implement it.
1349  */
irq_chip_get_parent_state(struct irq_data * data,enum irqchip_irq_state which,bool * state)1350 int irq_chip_get_parent_state(struct irq_data *data,
1351 			      enum irqchip_irq_state which,
1352 			      bool *state)
1353 {
1354 	data = data->parent_data;
1355 
1356 	if (!data || !data->chip->irq_get_irqchip_state)
1357 		return 0;
1358 
1359 	return data->chip->irq_get_irqchip_state(data, which, state);
1360 }
1361 EXPORT_SYMBOL_GPL(irq_chip_get_parent_state);
1362 
1363 /**
1364  * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
1365  * NULL)
1366  * @data:	Pointer to interrupt specific data
1367  */
irq_chip_enable_parent(struct irq_data * data)1368 void irq_chip_enable_parent(struct irq_data *data)
1369 {
1370 	data = data->parent_data;
1371 	if (data->chip->irq_enable)
1372 		data->chip->irq_enable(data);
1373 	else
1374 		data->chip->irq_unmask(data);
1375 }
1376 EXPORT_SYMBOL_GPL(irq_chip_enable_parent);
1377 
1378 /**
1379  * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if
1380  * NULL)
1381  * @data:	Pointer to interrupt specific data
1382  */
irq_chip_disable_parent(struct irq_data * data)1383 void irq_chip_disable_parent(struct irq_data *data)
1384 {
1385 	data = data->parent_data;
1386 	if (data->chip->irq_disable)
1387 		data->chip->irq_disable(data);
1388 	else
1389 		data->chip->irq_mask(data);
1390 }
1391 EXPORT_SYMBOL_GPL(irq_chip_disable_parent);
1392 
1393 /**
1394  * irq_chip_ack_parent - Acknowledge the parent interrupt
1395  * @data:	Pointer to interrupt specific data
1396  */
irq_chip_ack_parent(struct irq_data * data)1397 void irq_chip_ack_parent(struct irq_data *data)
1398 {
1399 	data = data->parent_data;
1400 	data->chip->irq_ack(data);
1401 }
1402 EXPORT_SYMBOL_GPL(irq_chip_ack_parent);
1403 
1404 /**
1405  * irq_chip_mask_parent - Mask the parent interrupt
1406  * @data:	Pointer to interrupt specific data
1407  */
irq_chip_mask_parent(struct irq_data * data)1408 void irq_chip_mask_parent(struct irq_data *data)
1409 {
1410 	data = data->parent_data;
1411 	data->chip->irq_mask(data);
1412 }
1413 EXPORT_SYMBOL_GPL(irq_chip_mask_parent);
1414 
1415 /**
1416  * irq_chip_mask_ack_parent - Mask and acknowledge the parent interrupt
1417  * @data:	Pointer to interrupt specific data
1418  */
irq_chip_mask_ack_parent(struct irq_data * data)1419 void irq_chip_mask_ack_parent(struct irq_data *data)
1420 {
1421 	data = data->parent_data;
1422 	data->chip->irq_mask_ack(data);
1423 }
1424 EXPORT_SYMBOL_GPL(irq_chip_mask_ack_parent);
1425 
1426 /**
1427  * irq_chip_unmask_parent - Unmask the parent interrupt
1428  * @data:	Pointer to interrupt specific data
1429  */
irq_chip_unmask_parent(struct irq_data * data)1430 void irq_chip_unmask_parent(struct irq_data *data)
1431 {
1432 	data = data->parent_data;
1433 	data->chip->irq_unmask(data);
1434 }
1435 EXPORT_SYMBOL_GPL(irq_chip_unmask_parent);
1436 
1437 /**
1438  * irq_chip_eoi_parent - Invoke EOI on the parent interrupt
1439  * @data:	Pointer to interrupt specific data
1440  */
irq_chip_eoi_parent(struct irq_data * data)1441 void irq_chip_eoi_parent(struct irq_data *data)
1442 {
1443 	data = data->parent_data;
1444 	data->chip->irq_eoi(data);
1445 }
1446 EXPORT_SYMBOL_GPL(irq_chip_eoi_parent);
1447 
1448 /**
1449  * irq_chip_set_affinity_parent - Set affinity on the parent interrupt
1450  * @data:	Pointer to interrupt specific data
1451  * @dest:	The affinity mask to set
1452  * @force:	Flag to enforce setting (disable online checks)
1453  *
1454  * Conditional, as the underlying parent chip might not implement it.
1455  */
irq_chip_set_affinity_parent(struct irq_data * data,const struct cpumask * dest,bool force)1456 int irq_chip_set_affinity_parent(struct irq_data *data,
1457 				 const struct cpumask *dest, bool force)
1458 {
1459 	data = data->parent_data;
1460 	if (data->chip->irq_set_affinity)
1461 		return data->chip->irq_set_affinity(data, dest, force);
1462 
1463 	return -ENOSYS;
1464 }
1465 EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent);
1466 
1467 /**
1468  * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
1469  * @data:	Pointer to interrupt specific data
1470  * @type:	IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
1471  *
1472  * Conditional, as the underlying parent chip might not implement it.
1473  */
irq_chip_set_type_parent(struct irq_data * data,unsigned int type)1474 int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
1475 {
1476 	data = data->parent_data;
1477 
1478 	if (data->chip->irq_set_type)
1479 		return data->chip->irq_set_type(data, type);
1480 
1481 	return -ENOSYS;
1482 }
1483 EXPORT_SYMBOL_GPL(irq_chip_set_type_parent);
1484 
1485 /**
1486  * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
1487  * @data:	Pointer to interrupt specific data
1488  *
1489  * Iterate through the domain hierarchy of the interrupt and check
1490  * whether a hw retrigger function exists. If yes, invoke it.
1491  */
irq_chip_retrigger_hierarchy(struct irq_data * data)1492 int irq_chip_retrigger_hierarchy(struct irq_data *data)
1493 {
1494 	for (data = data->parent_data; data; data = data->parent_data)
1495 		if (data->chip && data->chip->irq_retrigger)
1496 			return data->chip->irq_retrigger(data);
1497 
1498 	return 0;
1499 }
1500 EXPORT_SYMBOL_GPL(irq_chip_retrigger_hierarchy);
1501 
1502 /**
1503  * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt
1504  * @data:	Pointer to interrupt specific data
1505  * @vcpu_info:	The vcpu affinity information
1506  */
irq_chip_set_vcpu_affinity_parent(struct irq_data * data,void * vcpu_info)1507 int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
1508 {
1509 	data = data->parent_data;
1510 	if (data->chip->irq_set_vcpu_affinity)
1511 		return data->chip->irq_set_vcpu_affinity(data, vcpu_info);
1512 
1513 	return -ENOSYS;
1514 }
1515 EXPORT_SYMBOL_GPL(irq_chip_set_vcpu_affinity_parent);
1516 /**
1517  * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt
1518  * @data:	Pointer to interrupt specific data
1519  * @on:		Whether to set or reset the wake-up capability of this irq
1520  *
1521  * Conditional, as the underlying parent chip might not implement it.
1522  */
irq_chip_set_wake_parent(struct irq_data * data,unsigned int on)1523 int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
1524 {
1525 	data = data->parent_data;
1526 
1527 	if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE)
1528 		return 0;
1529 
1530 	if (data->chip->irq_set_wake)
1531 		return data->chip->irq_set_wake(data, on);
1532 
1533 	return -ENOSYS;
1534 }
1535 EXPORT_SYMBOL_GPL(irq_chip_set_wake_parent);
1536 
1537 /**
1538  * irq_chip_request_resources_parent - Request resources on the parent interrupt
1539  * @data:	Pointer to interrupt specific data
1540  */
irq_chip_request_resources_parent(struct irq_data * data)1541 int irq_chip_request_resources_parent(struct irq_data *data)
1542 {
1543 	data = data->parent_data;
1544 
1545 	if (data->chip->irq_request_resources)
1546 		return data->chip->irq_request_resources(data);
1547 
1548 	/* no error on missing optional irq_chip::irq_request_resources */
1549 	return 0;
1550 }
1551 EXPORT_SYMBOL_GPL(irq_chip_request_resources_parent);
1552 
1553 /**
1554  * irq_chip_release_resources_parent - Release resources on the parent interrupt
1555  * @data:	Pointer to interrupt specific data
1556  */
irq_chip_release_resources_parent(struct irq_data * data)1557 void irq_chip_release_resources_parent(struct irq_data *data)
1558 {
1559 	data = data->parent_data;
1560 	if (data->chip->irq_release_resources)
1561 		data->chip->irq_release_resources(data);
1562 }
1563 EXPORT_SYMBOL_GPL(irq_chip_release_resources_parent);
1564 #endif
1565 
1566 /**
1567  * irq_chip_compose_msi_msg - Compose msi message for a irq chip
1568  * @data:	Pointer to interrupt specific data
1569  * @msg:	Pointer to the MSI message
1570  *
1571  * For hierarchical domains we find the first chip in the hierarchy
1572  * which implements the irq_compose_msi_msg callback. For non
1573  * hierarchical we use the top level chip.
1574  */
irq_chip_compose_msi_msg(struct irq_data * data,struct msi_msg * msg)1575 int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1576 {
1577 	struct irq_data *pos;
1578 
1579 	for (pos = NULL; !pos && data; data = irqd_get_parent_data(data)) {
1580 		if (data->chip && data->chip->irq_compose_msi_msg)
1581 			pos = data;
1582 	}
1583 
1584 	if (!pos)
1585 		return -ENOSYS;
1586 
1587 	pos->chip->irq_compose_msi_msg(pos, msg);
1588 	return 0;
1589 }
1590 
irq_get_pm_device(struct irq_data * data)1591 static struct device *irq_get_pm_device(struct irq_data *data)
1592 {
1593 	if (data->domain)
1594 		return data->domain->pm_dev;
1595 
1596 	return NULL;
1597 }
1598 
1599 /**
1600  * irq_chip_pm_get - Enable power for an IRQ chip
1601  * @data:	Pointer to interrupt specific data
1602  *
1603  * Enable the power to the IRQ chip referenced by the interrupt data
1604  * structure.
1605  */
irq_chip_pm_get(struct irq_data * data)1606 int irq_chip_pm_get(struct irq_data *data)
1607 {
1608 	struct device *dev = irq_get_pm_device(data);
1609 	int retval = 0;
1610 
1611 	if (IS_ENABLED(CONFIG_PM) && dev)
1612 		retval = pm_runtime_resume_and_get(dev);
1613 
1614 	return retval;
1615 }
1616 
1617 /**
1618  * irq_chip_pm_put - Disable power for an IRQ chip
1619  * @data:	Pointer to interrupt specific data
1620  *
1621  * Disable the power to the IRQ chip referenced by the interrupt data
1622  * structure, belongs. Note that power will only be disabled, once this
1623  * function has been called for all IRQs that have called irq_chip_pm_get().
1624  */
irq_chip_pm_put(struct irq_data * data)1625 int irq_chip_pm_put(struct irq_data *data)
1626 {
1627 	struct device *dev = irq_get_pm_device(data);
1628 	int retval = 0;
1629 
1630 	if (IS_ENABLED(CONFIG_PM) && dev)
1631 		retval = pm_runtime_put(dev);
1632 
1633 	return (retval < 0) ? retval : 0;
1634 }
1635