• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
5  *
6  * This file contains the core interrupt handling code, for irq-chip based
7  * architectures. Detailed information is available in
8  * Documentation/core-api/genericirq.rst
9  */
10 
11 #include <linux/irq.h>
12 #include <linux/msi.h>
13 #include <linux/module.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/irqdomain.h>
17 #include <linux/wakeup_reason.h>
18 
19 #include <trace/events/irq.h>
20 
21 #include "internals.h"
22 
bad_chained_irq(int irq,void * dev_id)23 static irqreturn_t bad_chained_irq(int irq, void *dev_id)
24 {
25 	WARN_ONCE(1, "Chained irq %d should not call an action\n", irq);
26 	return IRQ_NONE;
27 }
28 
29 /*
30  * Chained handlers should never call action on their IRQ. This default
31  * action will emit warning if such thing happens.
32  */
33 struct irqaction chained_action = {
34 	.handler = bad_chained_irq,
35 };
36 
37 /**
38  *	irq_set_chip - set the irq chip for an irq
39  *	@irq:	irq number
40  *	@chip:	pointer to irq chip description structure
41  */
irq_set_chip(unsigned int irq,const struct irq_chip * chip)42 int irq_set_chip(unsigned int irq, const struct irq_chip *chip)
43 {
44 	unsigned long flags;
45 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
46 
47 	if (!desc)
48 		return -EINVAL;
49 
50 	desc->irq_data.chip = (struct irq_chip *)(chip ?: &no_irq_chip);
51 	irq_put_desc_unlock(desc, flags);
52 	/*
53 	 * For !CONFIG_SPARSE_IRQ make the irq show up in
54 	 * allocated_irqs.
55 	 */
56 	irq_mark_irq(irq);
57 	return 0;
58 }
59 EXPORT_SYMBOL(irq_set_chip);
60 
61 /**
62  *	irq_set_irq_type - set the irq trigger type for an irq
63  *	@irq:	irq number
64  *	@type:	IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
65  */
irq_set_irq_type(unsigned int irq,unsigned int type)66 int irq_set_irq_type(unsigned int irq, unsigned int type)
67 {
68 	unsigned long flags;
69 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
70 	int ret = 0;
71 
72 	if (!desc)
73 		return -EINVAL;
74 
75 	ret = __irq_set_trigger(desc, type);
76 	irq_put_desc_busunlock(desc, flags);
77 	return ret;
78 }
79 EXPORT_SYMBOL(irq_set_irq_type);
80 
81 /**
82  *	irq_set_handler_data - set irq handler data for an irq
83  *	@irq:	Interrupt number
84  *	@data:	Pointer to interrupt specific data
85  *
86  *	Set the hardware irq controller data for an irq
87  */
irq_set_handler_data(unsigned int irq,void * data)88 int irq_set_handler_data(unsigned int irq, void *data)
89 {
90 	unsigned long flags;
91 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
92 
93 	if (!desc)
94 		return -EINVAL;
95 	desc->irq_common_data.handler_data = data;
96 	irq_put_desc_unlock(desc, flags);
97 	return 0;
98 }
99 EXPORT_SYMBOL(irq_set_handler_data);
100 
101 /**
102  *	irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
103  *	@irq_base:	Interrupt number base
104  *	@irq_offset:	Interrupt number offset
105  *	@entry:		Pointer to MSI descriptor data
106  *
107  *	Set the MSI descriptor entry for an irq at offset
108  */
irq_set_msi_desc_off(unsigned int irq_base,unsigned int irq_offset,struct msi_desc * entry)109 int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
110 			 struct msi_desc *entry)
111 {
112 	unsigned long flags;
113 	struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
114 
115 	if (!desc)
116 		return -EINVAL;
117 	desc->irq_common_data.msi_desc = entry;
118 	if (entry && !irq_offset)
119 		entry->irq = irq_base;
120 	irq_put_desc_unlock(desc, flags);
121 	return 0;
122 }
123 
124 /**
125  *	irq_set_msi_desc - set MSI descriptor data for an irq
126  *	@irq:	Interrupt number
127  *	@entry:	Pointer to MSI descriptor data
128  *
129  *	Set the MSI descriptor entry for an irq
130  */
irq_set_msi_desc(unsigned int irq,struct msi_desc * entry)131 int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
132 {
133 	return irq_set_msi_desc_off(irq, 0, entry);
134 }
135 
136 /**
137  *	irq_set_chip_data - set irq chip data for an irq
138  *	@irq:	Interrupt number
139  *	@data:	Pointer to chip specific data
140  *
141  *	Set the hardware irq chip data for an irq
142  */
irq_set_chip_data(unsigned int irq,void * data)143 int irq_set_chip_data(unsigned int irq, void *data)
144 {
145 	unsigned long flags;
146 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
147 
148 	if (!desc)
149 		return -EINVAL;
150 	desc->irq_data.chip_data = data;
151 	irq_put_desc_unlock(desc, flags);
152 	return 0;
153 }
154 EXPORT_SYMBOL(irq_set_chip_data);
155 
irq_get_irq_data(unsigned int irq)156 struct irq_data *irq_get_irq_data(unsigned int irq)
157 {
158 	struct irq_desc *desc = irq_to_desc(irq);
159 
160 	return desc ? &desc->irq_data : NULL;
161 }
162 EXPORT_SYMBOL_GPL(irq_get_irq_data);
163 
irq_state_clr_disabled(struct irq_desc * desc)164 static void irq_state_clr_disabled(struct irq_desc *desc)
165 {
166 	irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
167 }
168 
irq_state_clr_masked(struct irq_desc * desc)169 static void irq_state_clr_masked(struct irq_desc *desc)
170 {
171 	irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
172 }
173 
irq_state_clr_started(struct irq_desc * desc)174 static void irq_state_clr_started(struct irq_desc *desc)
175 {
176 	irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED);
177 }
178 
irq_state_set_started(struct irq_desc * desc)179 static void irq_state_set_started(struct irq_desc *desc)
180 {
181 	irqd_set(&desc->irq_data, IRQD_IRQ_STARTED);
182 }
183 
184 enum {
185 	IRQ_STARTUP_NORMAL,
186 	IRQ_STARTUP_MANAGED,
187 	IRQ_STARTUP_ABORT,
188 };
189 
190 #ifdef CONFIG_SMP
191 static int
__irq_startup_managed(struct irq_desc * desc,const struct cpumask * aff,bool force)192 __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff,
193 		      bool force)
194 {
195 	struct irq_data *d = irq_desc_get_irq_data(desc);
196 
197 	if (!irqd_affinity_is_managed(d))
198 		return IRQ_STARTUP_NORMAL;
199 
200 	irqd_clr_managed_shutdown(d);
201 
202 	if (!cpumask_intersects(aff, cpu_online_mask)) {
203 		/*
204 		 * Catch code which fiddles with enable_irq() on a managed
205 		 * and potentially shutdown IRQ. Chained interrupt
206 		 * installment or irq auto probing should not happen on
207 		 * managed irqs either.
208 		 */
209 		if (WARN_ON_ONCE(force))
210 			return IRQ_STARTUP_ABORT;
211 		/*
212 		 * The interrupt was requested, but there is no online CPU
213 		 * in it's affinity mask. Put it into managed shutdown
214 		 * state and let the cpu hotplug mechanism start it up once
215 		 * a CPU in the mask becomes available.
216 		 */
217 		return IRQ_STARTUP_ABORT;
218 	}
219 	/*
220 	 * Managed interrupts have reserved resources, so this should not
221 	 * happen.
222 	 */
223 	if (WARN_ON(irq_domain_activate_irq(d, false)))
224 		return IRQ_STARTUP_ABORT;
225 	return IRQ_STARTUP_MANAGED;
226 }
227 
irq_startup_managed(struct irq_desc * desc)228 void irq_startup_managed(struct irq_desc *desc)
229 {
230 	struct irq_data *d = irq_desc_get_irq_data(desc);
231 
232 	/*
233 	 * Clear managed-shutdown flag, so we don't repeat managed-startup for
234 	 * multiple hotplugs, and cause imbalanced disable depth.
235 	 */
236 	irqd_clr_managed_shutdown(d);
237 
238 	/*
239 	 * Only start it up when the disable depth is 1, so that a disable,
240 	 * hotunplug, hotplug sequence does not end up enabling it during
241 	 * hotplug unconditionally.
242 	 */
243 	desc->depth--;
244 	if (!desc->depth)
245 		irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
246 }
247 
248 #else
249 static __always_inline int
__irq_startup_managed(struct irq_desc * desc,const struct cpumask * aff,bool force)250 __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff,
251 		      bool force)
252 {
253 	return IRQ_STARTUP_NORMAL;
254 }
255 #endif
256 
__irq_startup(struct irq_desc * desc)257 static int __irq_startup(struct irq_desc *desc)
258 {
259 	struct irq_data *d = irq_desc_get_irq_data(desc);
260 	int ret = 0;
261 
262 	/* Warn if this interrupt is not activated but try nevertheless */
263 	WARN_ON_ONCE(!irqd_is_activated(d));
264 
265 	if (d->chip->irq_startup) {
266 		ret = d->chip->irq_startup(d);
267 		irq_state_clr_disabled(desc);
268 		irq_state_clr_masked(desc);
269 	} else {
270 		irq_enable(desc);
271 	}
272 	irq_state_set_started(desc);
273 	return ret;
274 }
275 
irq_startup(struct irq_desc * desc,bool resend,bool force)276 int irq_startup(struct irq_desc *desc, bool resend, bool force)
277 {
278 	struct irq_data *d = irq_desc_get_irq_data(desc);
279 	const struct cpumask *aff = irq_data_get_affinity_mask(d);
280 	int ret = 0;
281 
282 	desc->depth = 0;
283 
284 	if (irqd_is_started(d)) {
285 		irq_enable(desc);
286 	} else {
287 		switch (__irq_startup_managed(desc, aff, force)) {
288 		case IRQ_STARTUP_NORMAL:
289 			if (d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP)
290 				irq_setup_affinity(desc);
291 			ret = __irq_startup(desc);
292 			if (!(d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP))
293 				irq_setup_affinity(desc);
294 			break;
295 		case IRQ_STARTUP_MANAGED:
296 			irq_do_set_affinity(d, aff, false);
297 			ret = __irq_startup(desc);
298 			break;
299 		case IRQ_STARTUP_ABORT:
300 			desc->depth = 1;
301 			irqd_set_managed_shutdown(d);
302 			return 0;
303 		}
304 	}
305 	if (resend)
306 		check_irq_resend(desc, false);
307 
308 	return ret;
309 }
310 
irq_activate(struct irq_desc * desc)311 int irq_activate(struct irq_desc *desc)
312 {
313 	struct irq_data *d = irq_desc_get_irq_data(desc);
314 
315 	if (!irqd_affinity_is_managed(d))
316 		return irq_domain_activate_irq(d, false);
317 	return 0;
318 }
319 
irq_activate_and_startup(struct irq_desc * desc,bool resend)320 int irq_activate_and_startup(struct irq_desc *desc, bool resend)
321 {
322 	if (WARN_ON(irq_activate(desc)))
323 		return 0;
324 	return irq_startup(desc, resend, IRQ_START_FORCE);
325 }
326 
327 static void __irq_disable(struct irq_desc *desc, bool mask);
328 
irq_shutdown(struct irq_desc * desc)329 void irq_shutdown(struct irq_desc *desc)
330 {
331 	if (irqd_is_started(&desc->irq_data)) {
332 		clear_irq_resend(desc);
333 		/*
334 		 * Increment disable depth, so that a managed shutdown on
335 		 * CPU hotunplug preserves the actual disabled state when the
336 		 * CPU comes back online. See irq_startup_managed().
337 		 */
338 		desc->depth++;
339 
340 		if (desc->irq_data.chip->irq_shutdown) {
341 			desc->irq_data.chip->irq_shutdown(&desc->irq_data);
342 			irq_state_set_disabled(desc);
343 			irq_state_set_masked(desc);
344 		} else {
345 			__irq_disable(desc, true);
346 		}
347 		irq_state_clr_started(desc);
348 	}
349 }
350 
351 
irq_shutdown_and_deactivate(struct irq_desc * desc)352 void irq_shutdown_and_deactivate(struct irq_desc *desc)
353 {
354 	irq_shutdown(desc);
355 	/*
356 	 * This must be called even if the interrupt was never started up,
357 	 * because the activation can happen before the interrupt is
358 	 * available for request/startup. It has it's own state tracking so
359 	 * it's safe to call it unconditionally.
360 	 */
361 	irq_domain_deactivate_irq(&desc->irq_data);
362 }
363 
irq_enable(struct irq_desc * desc)364 void irq_enable(struct irq_desc *desc)
365 {
366 	if (!irqd_irq_disabled(&desc->irq_data)) {
367 		unmask_irq(desc);
368 	} else {
369 		irq_state_clr_disabled(desc);
370 		if (desc->irq_data.chip->irq_enable) {
371 			desc->irq_data.chip->irq_enable(&desc->irq_data);
372 			irq_state_clr_masked(desc);
373 		} else {
374 			unmask_irq(desc);
375 		}
376 	}
377 }
378 
__irq_disable(struct irq_desc * desc,bool mask)379 static void __irq_disable(struct irq_desc *desc, bool mask)
380 {
381 	if (irqd_irq_disabled(&desc->irq_data)) {
382 		if (mask)
383 			mask_irq(desc);
384 	} else {
385 		irq_state_set_disabled(desc);
386 		if (desc->irq_data.chip->irq_disable) {
387 			desc->irq_data.chip->irq_disable(&desc->irq_data);
388 			irq_state_set_masked(desc);
389 		} else if (mask) {
390 			mask_irq(desc);
391 		}
392 	}
393 }
394 
395 /**
396  * irq_disable - Mark interrupt disabled
397  * @desc:	irq descriptor which should be disabled
398  *
399  * If the chip does not implement the irq_disable callback, we
400  * use a lazy disable approach. That means we mark the interrupt
401  * disabled, but leave the hardware unmasked. That's an
402  * optimization because we avoid the hardware access for the
403  * common case where no interrupt happens after we marked it
404  * disabled. If an interrupt happens, then the interrupt flow
405  * handler masks the line at the hardware level and marks it
406  * pending.
407  *
408  * If the interrupt chip does not implement the irq_disable callback,
409  * a driver can disable the lazy approach for a particular irq line by
410  * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can
411  * be used for devices which cannot disable the interrupt at the
412  * device level under certain circumstances and have to use
413  * disable_irq[_nosync] instead.
414  */
irq_disable(struct irq_desc * desc)415 void irq_disable(struct irq_desc *desc)
416 {
417 	__irq_disable(desc, irq_settings_disable_unlazy(desc));
418 }
419 
irq_percpu_enable(struct irq_desc * desc,unsigned int cpu)420 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
421 {
422 	if (desc->irq_data.chip->irq_enable)
423 		desc->irq_data.chip->irq_enable(&desc->irq_data);
424 	else
425 		desc->irq_data.chip->irq_unmask(&desc->irq_data);
426 	cpumask_set_cpu(cpu, desc->percpu_enabled);
427 }
428 
irq_percpu_disable(struct irq_desc * desc,unsigned int cpu)429 void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
430 {
431 	if (desc->irq_data.chip->irq_disable)
432 		desc->irq_data.chip->irq_disable(&desc->irq_data);
433 	else
434 		desc->irq_data.chip->irq_mask(&desc->irq_data);
435 	cpumask_clear_cpu(cpu, desc->percpu_enabled);
436 }
437 
mask_ack_irq(struct irq_desc * desc)438 static inline void mask_ack_irq(struct irq_desc *desc)
439 {
440 	if (desc->irq_data.chip->irq_mask_ack) {
441 		desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
442 		irq_state_set_masked(desc);
443 	} else {
444 		mask_irq(desc);
445 		if (desc->irq_data.chip->irq_ack)
446 			desc->irq_data.chip->irq_ack(&desc->irq_data);
447 	}
448 }
449 
mask_irq(struct irq_desc * desc)450 void mask_irq(struct irq_desc *desc)
451 {
452 	if (irqd_irq_masked(&desc->irq_data))
453 		return;
454 
455 	if (desc->irq_data.chip->irq_mask) {
456 		desc->irq_data.chip->irq_mask(&desc->irq_data);
457 		irq_state_set_masked(desc);
458 	}
459 }
460 
unmask_irq(struct irq_desc * desc)461 void unmask_irq(struct irq_desc *desc)
462 {
463 	if (!irqd_irq_masked(&desc->irq_data))
464 		return;
465 
466 	if (desc->irq_data.chip->irq_unmask) {
467 		desc->irq_data.chip->irq_unmask(&desc->irq_data);
468 		irq_state_clr_masked(desc);
469 	}
470 }
471 
unmask_threaded_irq(struct irq_desc * desc)472 void unmask_threaded_irq(struct irq_desc *desc)
473 {
474 	struct irq_chip *chip = desc->irq_data.chip;
475 
476 	if (chip->flags & IRQCHIP_EOI_THREADED)
477 		chip->irq_eoi(&desc->irq_data);
478 
479 	unmask_irq(desc);
480 }
481 
482 /*
483  *	handle_nested_irq - Handle a nested irq from a irq thread
484  *	@irq:	the interrupt number
485  *
486  *	Handle interrupts which are nested into a threaded interrupt
487  *	handler. The handler function is called inside the calling
488  *	threads context.
489  */
handle_nested_irq(unsigned int irq)490 void handle_nested_irq(unsigned int irq)
491 {
492 	struct irq_desc *desc = irq_to_desc(irq);
493 	struct irqaction *action;
494 	irqreturn_t action_ret;
495 
496 	might_sleep();
497 
498 	raw_spin_lock_irq(&desc->lock);
499 
500 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
501 
502 	action = desc->action;
503 	if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
504 		desc->istate |= IRQS_PENDING;
505 		raw_spin_unlock_irq(&desc->lock);
506 		return;
507 	}
508 
509 	kstat_incr_irqs_this_cpu(desc);
510 	atomic_inc(&desc->threads_active);
511 	raw_spin_unlock_irq(&desc->lock);
512 
513 	action_ret = IRQ_NONE;
514 	for_each_action_of_desc(desc, action)
515 		action_ret |= action->thread_fn(action->irq, action->dev_id);
516 
517 	if (!irq_settings_no_debug(desc))
518 		note_interrupt(desc, action_ret);
519 
520 	wake_threads_waitq(desc);
521 }
522 EXPORT_SYMBOL_GPL(handle_nested_irq);
523 
irq_check_poll(struct irq_desc * desc)524 static bool irq_check_poll(struct irq_desc *desc)
525 {
526 	if (!(desc->istate & IRQS_POLL_INPROGRESS))
527 		return false;
528 	return irq_wait_for_poll(desc);
529 }
530 
irq_may_run(struct irq_desc * desc)531 static bool irq_may_run(struct irq_desc *desc)
532 {
533 	unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;
534 
535 	/*
536 	 * If the interrupt is not in progress and is not an armed
537 	 * wakeup interrupt, proceed.
538 	 */
539 	if (!irqd_has_set(&desc->irq_data, mask)) {
540 #ifdef CONFIG_PM_SLEEP
541 		if (unlikely(desc->no_suspend_depth &&
542 			     irqd_is_wakeup_set(&desc->irq_data))) {
543 			unsigned int irq = irq_desc_get_irq(desc);
544 			const char *name = "(unnamed)";
545 
546 			if (desc->action && desc->action->name)
547 				name = desc->action->name;
548 
549 			log_abnormal_wakeup_reason("misconfigured IRQ %u %s",
550 						   irq, name);
551 		}
552 #endif
553 		return true;
554 	}
555 
556 	/*
557 	 * If the interrupt is an armed wakeup source, mark it pending
558 	 * and suspended, disable it and notify the pm core about the
559 	 * event.
560 	 */
561 	if (irq_pm_check_wakeup(desc))
562 		return false;
563 
564 	/*
565 	 * Handle a potential concurrent poll on a different core.
566 	 */
567 	return irq_check_poll(desc);
568 }
569 
570 /**
571  *	handle_simple_irq - Simple and software-decoded IRQs.
572  *	@desc:	the interrupt description structure for this irq
573  *
574  *	Simple interrupts are either sent from a demultiplexing interrupt
575  *	handler or come from hardware, where no interrupt hardware control
576  *	is necessary.
577  *
578  *	Note: The caller is expected to handle the ack, clear, mask and
579  *	unmask issues if necessary.
580  */
handle_simple_irq(struct irq_desc * desc)581 void handle_simple_irq(struct irq_desc *desc)
582 {
583 	raw_spin_lock(&desc->lock);
584 
585 	if (!irq_may_run(desc))
586 		goto out_unlock;
587 
588 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
589 
590 	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
591 		desc->istate |= IRQS_PENDING;
592 		goto out_unlock;
593 	}
594 
595 	kstat_incr_irqs_this_cpu(desc);
596 	handle_irq_event(desc);
597 
598 out_unlock:
599 	raw_spin_unlock(&desc->lock);
600 }
601 EXPORT_SYMBOL_GPL(handle_simple_irq);
602 
603 /**
604  *	handle_untracked_irq - Simple and software-decoded IRQs.
605  *	@desc:	the interrupt description structure for this irq
606  *
607  *	Untracked interrupts are sent from a demultiplexing interrupt
608  *	handler when the demultiplexer does not know which device it its
609  *	multiplexed irq domain generated the interrupt. IRQ's handled
610  *	through here are not subjected to stats tracking, randomness, or
611  *	spurious interrupt detection.
612  *
613  *	Note: Like handle_simple_irq, the caller is expected to handle
614  *	the ack, clear, mask and unmask issues if necessary.
615  */
handle_untracked_irq(struct irq_desc * desc)616 void handle_untracked_irq(struct irq_desc *desc)
617 {
618 	raw_spin_lock(&desc->lock);
619 
620 	if (!irq_may_run(desc))
621 		goto out_unlock;
622 
623 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
624 
625 	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
626 		desc->istate |= IRQS_PENDING;
627 		goto out_unlock;
628 	}
629 
630 	desc->istate &= ~IRQS_PENDING;
631 	irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
632 	raw_spin_unlock(&desc->lock);
633 
634 	__handle_irq_event_percpu(desc);
635 
636 	raw_spin_lock(&desc->lock);
637 	irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
638 
639 out_unlock:
640 	raw_spin_unlock(&desc->lock);
641 }
642 EXPORT_SYMBOL_GPL(handle_untracked_irq);
643 
644 /*
645  * Called unconditionally from handle_level_irq() and only for oneshot
646  * interrupts from handle_fasteoi_irq()
647  */
cond_unmask_irq(struct irq_desc * desc)648 static void cond_unmask_irq(struct irq_desc *desc)
649 {
650 	/*
651 	 * We need to unmask in the following cases:
652 	 * - Standard level irq (IRQF_ONESHOT is not set)
653 	 * - Oneshot irq which did not wake the thread (caused by a
654 	 *   spurious interrupt or a primary handler handling it
655 	 *   completely).
656 	 */
657 	if (!irqd_irq_disabled(&desc->irq_data) &&
658 	    irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
659 		unmask_irq(desc);
660 }
661 
662 /**
663  *	handle_level_irq - Level type irq handler
664  *	@desc:	the interrupt description structure for this irq
665  *
666  *	Level type interrupts are active as long as the hardware line has
667  *	the active level. This may require to mask the interrupt and unmask
668  *	it after the associated handler has acknowledged the device, so the
669  *	interrupt line is back to inactive.
670  */
handle_level_irq(struct irq_desc * desc)671 void handle_level_irq(struct irq_desc *desc)
672 {
673 	raw_spin_lock(&desc->lock);
674 	mask_ack_irq(desc);
675 
676 	if (!irq_may_run(desc))
677 		goto out_unlock;
678 
679 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
680 
681 	/*
682 	 * If its disabled or no action available
683 	 * keep it masked and get out of here
684 	 */
685 	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
686 		desc->istate |= IRQS_PENDING;
687 		goto out_unlock;
688 	}
689 
690 	kstat_incr_irqs_this_cpu(desc);
691 	handle_irq_event(desc);
692 
693 	cond_unmask_irq(desc);
694 
695 out_unlock:
696 	raw_spin_unlock(&desc->lock);
697 }
698 EXPORT_SYMBOL_GPL(handle_level_irq);
699 
cond_unmask_eoi_irq(struct irq_desc * desc,struct irq_chip * chip)700 static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
701 {
702 	if (!(desc->istate & IRQS_ONESHOT)) {
703 		chip->irq_eoi(&desc->irq_data);
704 		return;
705 	}
706 	/*
707 	 * We need to unmask in the following cases:
708 	 * - Oneshot irq which did not wake the thread (caused by a
709 	 *   spurious interrupt or a primary handler handling it
710 	 *   completely).
711 	 */
712 	if (!irqd_irq_disabled(&desc->irq_data) &&
713 	    irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
714 		chip->irq_eoi(&desc->irq_data);
715 		unmask_irq(desc);
716 	} else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
717 		chip->irq_eoi(&desc->irq_data);
718 	}
719 }
720 
721 /**
722  *	handle_fasteoi_irq - irq handler for transparent controllers
723  *	@desc:	the interrupt description structure for this irq
724  *
725  *	Only a single callback will be issued to the chip: an ->eoi()
726  *	call when the interrupt has been serviced. This enables support
727  *	for modern forms of interrupt handlers, which handle the flow
728  *	details in hardware, transparently.
729  */
handle_fasteoi_irq(struct irq_desc * desc)730 void handle_fasteoi_irq(struct irq_desc *desc)
731 {
732 	struct irq_chip *chip = desc->irq_data.chip;
733 
734 	raw_spin_lock(&desc->lock);
735 
736 	/*
737 	 * When an affinity change races with IRQ handling, the next interrupt
738 	 * can arrive on the new CPU before the original CPU has completed
739 	 * handling the previous one - it may need to be resent.
740 	 */
741 	if (!irq_may_run(desc)) {
742 		if (irqd_needs_resend_when_in_progress(&desc->irq_data))
743 			desc->istate |= IRQS_PENDING;
744 		goto out;
745 	}
746 
747 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
748 
749 	/*
750 	 * If its disabled or no action available
751 	 * then mask it and get out of here:
752 	 */
753 	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
754 		desc->istate |= IRQS_PENDING;
755 		mask_irq(desc);
756 		goto out;
757 	}
758 
759 	kstat_incr_irqs_this_cpu(desc);
760 	if (desc->istate & IRQS_ONESHOT)
761 		mask_irq(desc);
762 
763 	handle_irq_event(desc);
764 
765 	cond_unmask_eoi_irq(desc, chip);
766 
767 	/*
768 	 * When the race described above happens this will resend the interrupt.
769 	 */
770 	if (unlikely(desc->istate & IRQS_PENDING))
771 		check_irq_resend(desc, false);
772 
773 	raw_spin_unlock(&desc->lock);
774 	return;
775 out:
776 	if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
777 		chip->irq_eoi(&desc->irq_data);
778 	raw_spin_unlock(&desc->lock);
779 }
780 EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
781 
782 /**
783  *	handle_fasteoi_nmi - irq handler for NMI interrupt lines
784  *	@desc:	the interrupt description structure for this irq
785  *
786  *	A simple NMI-safe handler, considering the restrictions
787  *	from request_nmi.
788  *
789  *	Only a single callback will be issued to the chip: an ->eoi()
790  *	call when the interrupt has been serviced. This enables support
791  *	for modern forms of interrupt handlers, which handle the flow
792  *	details in hardware, transparently.
793  */
handle_fasteoi_nmi(struct irq_desc * desc)794 void handle_fasteoi_nmi(struct irq_desc *desc)
795 {
796 	struct irq_chip *chip = irq_desc_get_chip(desc);
797 	struct irqaction *action = desc->action;
798 	unsigned int irq = irq_desc_get_irq(desc);
799 	irqreturn_t res;
800 
801 	__kstat_incr_irqs_this_cpu(desc);
802 
803 	trace_irq_handler_entry(irq, action);
804 	/*
805 	 * NMIs cannot be shared, there is only one action.
806 	 */
807 	res = action->handler(irq, action->dev_id);
808 	trace_irq_handler_exit(irq, action, res);
809 
810 	if (chip->irq_eoi)
811 		chip->irq_eoi(&desc->irq_data);
812 }
813 EXPORT_SYMBOL_GPL(handle_fasteoi_nmi);
814 
815 /**
816  *	handle_edge_irq - edge type IRQ handler
817  *	@desc:	the interrupt description structure for this irq
818  *
819  *	Interrupt occurs on the falling and/or rising edge of a hardware
820  *	signal. The occurrence is latched into the irq controller hardware
821  *	and must be acked in order to be reenabled. After the ack another
822  *	interrupt can happen on the same source even before the first one
823  *	is handled by the associated event handler. If this happens it
824  *	might be necessary to disable (mask) the interrupt depending on the
825  *	controller hardware. This requires to reenable the interrupt inside
826  *	of the loop which handles the interrupts which have arrived while
827  *	the handler was running. If all pending interrupts are handled, the
828  *	loop is left.
829  */
handle_edge_irq(struct irq_desc * desc)830 void handle_edge_irq(struct irq_desc *desc)
831 {
832 	raw_spin_lock(&desc->lock);
833 
834 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
835 
836 	if (!irq_may_run(desc)) {
837 		desc->istate |= IRQS_PENDING;
838 		mask_ack_irq(desc);
839 		goto out_unlock;
840 	}
841 
842 	/*
843 	 * If its disabled or no action available then mask it and get
844 	 * out of here.
845 	 */
846 	if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
847 		desc->istate |= IRQS_PENDING;
848 		mask_ack_irq(desc);
849 		goto out_unlock;
850 	}
851 
852 	kstat_incr_irqs_this_cpu(desc);
853 
854 	/* Start handling the irq */
855 	desc->irq_data.chip->irq_ack(&desc->irq_data);
856 
857 	do {
858 		if (unlikely(!desc->action)) {
859 			mask_irq(desc);
860 			goto out_unlock;
861 		}
862 
863 		/*
864 		 * When another irq arrived while we were handling
865 		 * one, we could have masked the irq.
866 		 * Reenable it, if it was not disabled in meantime.
867 		 */
868 		if (unlikely(desc->istate & IRQS_PENDING)) {
869 			if (!irqd_irq_disabled(&desc->irq_data) &&
870 			    irqd_irq_masked(&desc->irq_data))
871 				unmask_irq(desc);
872 		}
873 
874 		handle_irq_event(desc);
875 
876 	} while ((desc->istate & IRQS_PENDING) &&
877 		 !irqd_irq_disabled(&desc->irq_data));
878 
879 out_unlock:
880 	raw_spin_unlock(&desc->lock);
881 }
882 EXPORT_SYMBOL(handle_edge_irq);
883 
884 #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
885 /**
886  *	handle_edge_eoi_irq - edge eoi type IRQ handler
887  *	@desc:	the interrupt description structure for this irq
888  *
889  * Similar as the above handle_edge_irq, but using eoi and w/o the
890  * mask/unmask logic.
891  */
handle_edge_eoi_irq(struct irq_desc * desc)892 void handle_edge_eoi_irq(struct irq_desc *desc)
893 {
894 	struct irq_chip *chip = irq_desc_get_chip(desc);
895 
896 	raw_spin_lock(&desc->lock);
897 
898 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
899 
900 	if (!irq_may_run(desc)) {
901 		desc->istate |= IRQS_PENDING;
902 		goto out_eoi;
903 	}
904 
905 	/*
906 	 * If its disabled or no action available then mask it and get
907 	 * out of here.
908 	 */
909 	if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
910 		desc->istate |= IRQS_PENDING;
911 		goto out_eoi;
912 	}
913 
914 	kstat_incr_irqs_this_cpu(desc);
915 
916 	do {
917 		if (unlikely(!desc->action))
918 			goto out_eoi;
919 
920 		handle_irq_event(desc);
921 
922 	} while ((desc->istate & IRQS_PENDING) &&
923 		 !irqd_irq_disabled(&desc->irq_data));
924 
925 out_eoi:
926 	chip->irq_eoi(&desc->irq_data);
927 	raw_spin_unlock(&desc->lock);
928 }
929 #endif
930 
931 /**
932  *	handle_percpu_irq - Per CPU local irq handler
933  *	@desc:	the interrupt description structure for this irq
934  *
935  *	Per CPU interrupts on SMP machines without locking requirements
936  */
handle_percpu_irq(struct irq_desc * desc)937 void handle_percpu_irq(struct irq_desc *desc)
938 {
939 	struct irq_chip *chip = irq_desc_get_chip(desc);
940 
941 	/*
942 	 * PER CPU interrupts are not serialized. Do not touch
943 	 * desc->tot_count.
944 	 */
945 	__kstat_incr_irqs_this_cpu(desc);
946 
947 	if (chip->irq_ack)
948 		chip->irq_ack(&desc->irq_data);
949 
950 	handle_irq_event_percpu(desc);
951 
952 	if (chip->irq_eoi)
953 		chip->irq_eoi(&desc->irq_data);
954 }
955 
956 /**
957  * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
958  * @desc:	the interrupt description structure for this irq
959  *
960  * Per CPU interrupts on SMP machines without locking requirements. Same as
961  * handle_percpu_irq() above but with the following extras:
962  *
963  * action->percpu_dev_id is a pointer to percpu variables which
964  * contain the real device id for the cpu on which this handler is
965  * called
966  */
handle_percpu_devid_irq(struct irq_desc * desc)967 void handle_percpu_devid_irq(struct irq_desc *desc)
968 {
969 	struct irq_chip *chip = irq_desc_get_chip(desc);
970 	struct irqaction *action = desc->action;
971 	unsigned int irq = irq_desc_get_irq(desc);
972 	irqreturn_t res;
973 
974 	/*
975 	 * PER CPU interrupts are not serialized. Do not touch
976 	 * desc->tot_count.
977 	 */
978 	__kstat_incr_irqs_this_cpu(desc);
979 
980 	if (chip->irq_ack)
981 		chip->irq_ack(&desc->irq_data);
982 
983 	if (likely(action)) {
984 		trace_irq_handler_entry(irq, action);
985 		res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
986 		trace_irq_handler_exit(irq, action, res);
987 	} else {
988 		unsigned int cpu = smp_processor_id();
989 		bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
990 
991 		if (enabled)
992 			irq_percpu_disable(desc, cpu);
993 
994 		pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n",
995 			    enabled ? " and unmasked" : "", irq, cpu);
996 	}
997 
998 	if (chip->irq_eoi)
999 		chip->irq_eoi(&desc->irq_data);
1000 }
1001 
1002 /**
1003  * handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu
1004  *				     dev ids
1005  * @desc:	the interrupt description structure for this irq
1006  *
1007  * Similar to handle_fasteoi_nmi, but handling the dev_id cookie
1008  * as a percpu pointer.
1009  */
handle_percpu_devid_fasteoi_nmi(struct irq_desc * desc)1010 void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc)
1011 {
1012 	struct irq_chip *chip = irq_desc_get_chip(desc);
1013 	struct irqaction *action = desc->action;
1014 	unsigned int irq = irq_desc_get_irq(desc);
1015 	irqreturn_t res;
1016 
1017 	__kstat_incr_irqs_this_cpu(desc);
1018 
1019 	trace_irq_handler_entry(irq, action);
1020 	res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
1021 	trace_irq_handler_exit(irq, action, res);
1022 
1023 	if (chip->irq_eoi)
1024 		chip->irq_eoi(&desc->irq_data);
1025 }
1026 
1027 static void
__irq_do_set_handler(struct irq_desc * desc,irq_flow_handler_t handle,int is_chained,const char * name)1028 __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
1029 		     int is_chained, const char *name)
1030 {
1031 	if (!handle) {
1032 		handle = handle_bad_irq;
1033 	} else {
1034 		struct irq_data *irq_data = &desc->irq_data;
1035 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1036 		/*
1037 		 * With hierarchical domains we might run into a
1038 		 * situation where the outermost chip is not yet set
1039 		 * up, but the inner chips are there.  Instead of
1040 		 * bailing we install the handler, but obviously we
1041 		 * cannot enable/startup the interrupt at this point.
1042 		 */
1043 		while (irq_data) {
1044 			if (irq_data->chip != &no_irq_chip)
1045 				break;
1046 			/*
1047 			 * Bail out if the outer chip is not set up
1048 			 * and the interrupt supposed to be started
1049 			 * right away.
1050 			 */
1051 			if (WARN_ON(is_chained))
1052 				return;
1053 			/* Try the parent */
1054 			irq_data = irq_data->parent_data;
1055 		}
1056 #endif
1057 		if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip))
1058 			return;
1059 	}
1060 
1061 	/* Uninstall? */
1062 	if (handle == handle_bad_irq) {
1063 		if (desc->irq_data.chip != &no_irq_chip)
1064 			mask_ack_irq(desc);
1065 		irq_state_set_disabled(desc);
1066 		if (is_chained) {
1067 			desc->action = NULL;
1068 			WARN_ON(irq_chip_pm_put(irq_desc_get_irq_data(desc)));
1069 		}
1070 		desc->depth = 1;
1071 	}
1072 	desc->handle_irq = handle;
1073 	desc->name = name;
1074 
1075 	if (handle != handle_bad_irq && is_chained) {
1076 		unsigned int type = irqd_get_trigger_type(&desc->irq_data);
1077 
1078 		/*
1079 		 * We're about to start this interrupt immediately,
1080 		 * hence the need to set the trigger configuration.
1081 		 * But the .set_type callback may have overridden the
1082 		 * flow handler, ignoring that we're dealing with a
1083 		 * chained interrupt. Reset it immediately because we
1084 		 * do know better.
1085 		 */
1086 		if (type != IRQ_TYPE_NONE) {
1087 			__irq_set_trigger(desc, type);
1088 			desc->handle_irq = handle;
1089 		}
1090 
1091 		irq_settings_set_noprobe(desc);
1092 		irq_settings_set_norequest(desc);
1093 		irq_settings_set_nothread(desc);
1094 		desc->action = &chained_action;
1095 		WARN_ON(irq_chip_pm_get(irq_desc_get_irq_data(desc)));
1096 		irq_activate_and_startup(desc, IRQ_RESEND);
1097 	}
1098 }
1099 
1100 void
__irq_set_handler(unsigned int irq,irq_flow_handler_t handle,int is_chained,const char * name)1101 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
1102 		  const char *name)
1103 {
1104 	unsigned long flags;
1105 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
1106 
1107 	if (!desc)
1108 		return;
1109 
1110 	__irq_do_set_handler(desc, handle, is_chained, name);
1111 	irq_put_desc_busunlock(desc, flags);
1112 }
1113 EXPORT_SYMBOL_GPL(__irq_set_handler);
1114 
1115 void
irq_set_chained_handler_and_data(unsigned int irq,irq_flow_handler_t handle,void * data)1116 irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
1117 				 void *data)
1118 {
1119 	unsigned long flags;
1120 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
1121 
1122 	if (!desc)
1123 		return;
1124 
1125 	desc->irq_common_data.handler_data = data;
1126 	__irq_do_set_handler(desc, handle, 1, NULL);
1127 
1128 	irq_put_desc_busunlock(desc, flags);
1129 }
1130 EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data);
1131 
1132 void
irq_set_chip_and_handler_name(unsigned int irq,const struct irq_chip * chip,irq_flow_handler_t handle,const char * name)1133 irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip,
1134 			      irq_flow_handler_t handle, const char *name)
1135 {
1136 	irq_set_chip(irq, chip);
1137 	__irq_set_handler(irq, handle, 0, name);
1138 }
1139 EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
1140 
irq_modify_status(unsigned int irq,unsigned long clr,unsigned long set)1141 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
1142 {
1143 	unsigned long flags, trigger, tmp;
1144 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
1145 
1146 	if (!desc)
1147 		return;
1148 
1149 	/*
1150 	 * Warn when a driver sets the no autoenable flag on an already
1151 	 * active interrupt.
1152 	 */
1153 	WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN));
1154 
1155 	irq_settings_clr_and_set(desc, clr, set);
1156 
1157 	trigger = irqd_get_trigger_type(&desc->irq_data);
1158 
1159 	irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
1160 		   IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
1161 	if (irq_settings_has_no_balance_set(desc))
1162 		irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1163 	if (irq_settings_is_per_cpu(desc))
1164 		irqd_set(&desc->irq_data, IRQD_PER_CPU);
1165 	if (irq_settings_can_move_pcntxt(desc))
1166 		irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
1167 	if (irq_settings_is_level(desc))
1168 		irqd_set(&desc->irq_data, IRQD_LEVEL);
1169 
1170 	tmp = irq_settings_get_trigger_mask(desc);
1171 	if (tmp != IRQ_TYPE_NONE)
1172 		trigger = tmp;
1173 
1174 	irqd_set(&desc->irq_data, trigger);
1175 
1176 	irq_put_desc_unlock(desc, flags);
1177 }
1178 EXPORT_SYMBOL_GPL(irq_modify_status);
1179 
1180 #ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE
1181 /**
1182  *	irq_cpu_online - Invoke all irq_cpu_online functions.
1183  *
1184  *	Iterate through all irqs and invoke the chip.irq_cpu_online()
1185  *	for each.
1186  */
irq_cpu_online(void)1187 void irq_cpu_online(void)
1188 {
1189 	struct irq_desc *desc;
1190 	struct irq_chip *chip;
1191 	unsigned long flags;
1192 	unsigned int irq;
1193 
1194 	for_each_active_irq(irq) {
1195 		desc = irq_to_desc(irq);
1196 		if (!desc)
1197 			continue;
1198 
1199 		raw_spin_lock_irqsave(&desc->lock, flags);
1200 
1201 		chip = irq_data_get_irq_chip(&desc->irq_data);
1202 		if (chip && chip->irq_cpu_online &&
1203 		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
1204 		     !irqd_irq_disabled(&desc->irq_data)))
1205 			chip->irq_cpu_online(&desc->irq_data);
1206 
1207 		raw_spin_unlock_irqrestore(&desc->lock, flags);
1208 	}
1209 }
1210 
1211 /**
1212  *	irq_cpu_offline - Invoke all irq_cpu_offline functions.
1213  *
1214  *	Iterate through all irqs and invoke the chip.irq_cpu_offline()
1215  *	for each.
1216  */
irq_cpu_offline(void)1217 void irq_cpu_offline(void)
1218 {
1219 	struct irq_desc *desc;
1220 	struct irq_chip *chip;
1221 	unsigned long flags;
1222 	unsigned int irq;
1223 
1224 	for_each_active_irq(irq) {
1225 		desc = irq_to_desc(irq);
1226 		if (!desc)
1227 			continue;
1228 
1229 		raw_spin_lock_irqsave(&desc->lock, flags);
1230 
1231 		chip = irq_data_get_irq_chip(&desc->irq_data);
1232 		if (chip && chip->irq_cpu_offline &&
1233 		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
1234 		     !irqd_irq_disabled(&desc->irq_data)))
1235 			chip->irq_cpu_offline(&desc->irq_data);
1236 
1237 		raw_spin_unlock_irqrestore(&desc->lock, flags);
1238 	}
1239 }
1240 #endif
1241 
1242 #ifdef	CONFIG_IRQ_DOMAIN_HIERARCHY
1243 
1244 #ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS
1245 /**
1246  *	handle_fasteoi_ack_irq - irq handler for edge hierarchy
1247  *	stacked on transparent controllers
1248  *
1249  *	@desc:	the interrupt description structure for this irq
1250  *
1251  *	Like handle_fasteoi_irq(), but for use with hierarchy where
1252  *	the irq_chip also needs to have its ->irq_ack() function
1253  *	called.
1254  */
handle_fasteoi_ack_irq(struct irq_desc * desc)1255 void handle_fasteoi_ack_irq(struct irq_desc *desc)
1256 {
1257 	struct irq_chip *chip = desc->irq_data.chip;
1258 
1259 	raw_spin_lock(&desc->lock);
1260 
1261 	if (!irq_may_run(desc))
1262 		goto out;
1263 
1264 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
1265 
1266 	/*
1267 	 * If its disabled or no action available
1268 	 * then mask it and get out of here:
1269 	 */
1270 	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
1271 		desc->istate |= IRQS_PENDING;
1272 		mask_irq(desc);
1273 		goto out;
1274 	}
1275 
1276 	kstat_incr_irqs_this_cpu(desc);
1277 	if (desc->istate & IRQS_ONESHOT)
1278 		mask_irq(desc);
1279 
1280 	/* Start handling the irq */
1281 	desc->irq_data.chip->irq_ack(&desc->irq_data);
1282 
1283 	handle_irq_event(desc);
1284 
1285 	cond_unmask_eoi_irq(desc, chip);
1286 
1287 	raw_spin_unlock(&desc->lock);
1288 	return;
1289 out:
1290 	if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
1291 		chip->irq_eoi(&desc->irq_data);
1292 	raw_spin_unlock(&desc->lock);
1293 }
1294 EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq);
1295 
1296 /**
1297  *	handle_fasteoi_mask_irq - irq handler for level hierarchy
1298  *	stacked on transparent controllers
1299  *
1300  *	@desc:	the interrupt description structure for this irq
1301  *
1302  *	Like handle_fasteoi_irq(), but for use with hierarchy where
1303  *	the irq_chip also needs to have its ->irq_mask_ack() function
1304  *	called.
1305  */
handle_fasteoi_mask_irq(struct irq_desc * desc)1306 void handle_fasteoi_mask_irq(struct irq_desc *desc)
1307 {
1308 	struct irq_chip *chip = desc->irq_data.chip;
1309 
1310 	raw_spin_lock(&desc->lock);
1311 	mask_ack_irq(desc);
1312 
1313 	if (!irq_may_run(desc))
1314 		goto out;
1315 
1316 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
1317 
1318 	/*
1319 	 * If its disabled or no action available
1320 	 * then mask it and get out of here:
1321 	 */
1322 	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
1323 		desc->istate |= IRQS_PENDING;
1324 		mask_irq(desc);
1325 		goto out;
1326 	}
1327 
1328 	kstat_incr_irqs_this_cpu(desc);
1329 	if (desc->istate & IRQS_ONESHOT)
1330 		mask_irq(desc);
1331 
1332 	handle_irq_event(desc);
1333 
1334 	cond_unmask_eoi_irq(desc, chip);
1335 
1336 	raw_spin_unlock(&desc->lock);
1337 	return;
1338 out:
1339 	if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
1340 		chip->irq_eoi(&desc->irq_data);
1341 	raw_spin_unlock(&desc->lock);
1342 }
1343 EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq);
1344 
1345 #endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */
1346 
1347 /**
1348  * irq_chip_set_parent_state - set the state of a parent interrupt.
1349  *
1350  * @data: Pointer to interrupt specific data
1351  * @which: State to be restored (one of IRQCHIP_STATE_*)
1352  * @val: Value corresponding to @which
1353  *
1354  * Conditional success, if the underlying irqchip does not implement it.
1355  */
irq_chip_set_parent_state(struct irq_data * data,enum irqchip_irq_state which,bool val)1356 int irq_chip_set_parent_state(struct irq_data *data,
1357 			      enum irqchip_irq_state which,
1358 			      bool val)
1359 {
1360 	data = data->parent_data;
1361 
1362 	if (!data || !data->chip->irq_set_irqchip_state)
1363 		return 0;
1364 
1365 	return data->chip->irq_set_irqchip_state(data, which, val);
1366 }
1367 EXPORT_SYMBOL_GPL(irq_chip_set_parent_state);
1368 
1369 /**
1370  * irq_chip_get_parent_state - get the state of a parent interrupt.
1371  *
1372  * @data: Pointer to interrupt specific data
1373  * @which: one of IRQCHIP_STATE_* the caller wants to know
1374  * @state: a pointer to a boolean where the state is to be stored
1375  *
1376  * Conditional success, if the underlying irqchip does not implement it.
1377  */
irq_chip_get_parent_state(struct irq_data * data,enum irqchip_irq_state which,bool * state)1378 int irq_chip_get_parent_state(struct irq_data *data,
1379 			      enum irqchip_irq_state which,
1380 			      bool *state)
1381 {
1382 	data = data->parent_data;
1383 
1384 	if (!data || !data->chip->irq_get_irqchip_state)
1385 		return 0;
1386 
1387 	return data->chip->irq_get_irqchip_state(data, which, state);
1388 }
1389 EXPORT_SYMBOL_GPL(irq_chip_get_parent_state);
1390 
1391 /**
1392  * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
1393  * NULL)
1394  * @data:	Pointer to interrupt specific data
1395  */
irq_chip_enable_parent(struct irq_data * data)1396 void irq_chip_enable_parent(struct irq_data *data)
1397 {
1398 	data = data->parent_data;
1399 	if (data->chip->irq_enable)
1400 		data->chip->irq_enable(data);
1401 	else
1402 		data->chip->irq_unmask(data);
1403 }
1404 EXPORT_SYMBOL_GPL(irq_chip_enable_parent);
1405 
1406 /**
1407  * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if
1408  * NULL)
1409  * @data:	Pointer to interrupt specific data
1410  */
irq_chip_disable_parent(struct irq_data * data)1411 void irq_chip_disable_parent(struct irq_data *data)
1412 {
1413 	data = data->parent_data;
1414 	if (data->chip->irq_disable)
1415 		data->chip->irq_disable(data);
1416 	else
1417 		data->chip->irq_mask(data);
1418 }
1419 EXPORT_SYMBOL_GPL(irq_chip_disable_parent);
1420 
1421 /**
1422  * irq_chip_ack_parent - Acknowledge the parent interrupt
1423  * @data:	Pointer to interrupt specific data
1424  */
irq_chip_ack_parent(struct irq_data * data)1425 void irq_chip_ack_parent(struct irq_data *data)
1426 {
1427 	data = data->parent_data;
1428 	data->chip->irq_ack(data);
1429 }
1430 EXPORT_SYMBOL_GPL(irq_chip_ack_parent);
1431 
1432 /**
1433  * irq_chip_mask_parent - Mask the parent interrupt
1434  * @data:	Pointer to interrupt specific data
1435  */
irq_chip_mask_parent(struct irq_data * data)1436 void irq_chip_mask_parent(struct irq_data *data)
1437 {
1438 	data = data->parent_data;
1439 	data->chip->irq_mask(data);
1440 }
1441 EXPORT_SYMBOL_GPL(irq_chip_mask_parent);
1442 
1443 /**
1444  * irq_chip_mask_ack_parent - Mask and acknowledge the parent interrupt
1445  * @data:	Pointer to interrupt specific data
1446  */
irq_chip_mask_ack_parent(struct irq_data * data)1447 void irq_chip_mask_ack_parent(struct irq_data *data)
1448 {
1449 	data = data->parent_data;
1450 	data->chip->irq_mask_ack(data);
1451 }
1452 EXPORT_SYMBOL_GPL(irq_chip_mask_ack_parent);
1453 
1454 /**
1455  * irq_chip_unmask_parent - Unmask the parent interrupt
1456  * @data:	Pointer to interrupt specific data
1457  */
irq_chip_unmask_parent(struct irq_data * data)1458 void irq_chip_unmask_parent(struct irq_data *data)
1459 {
1460 	data = data->parent_data;
1461 	data->chip->irq_unmask(data);
1462 }
1463 EXPORT_SYMBOL_GPL(irq_chip_unmask_parent);
1464 
1465 /**
1466  * irq_chip_eoi_parent - Invoke EOI on the parent interrupt
1467  * @data:	Pointer to interrupt specific data
1468  */
irq_chip_eoi_parent(struct irq_data * data)1469 void irq_chip_eoi_parent(struct irq_data *data)
1470 {
1471 	data = data->parent_data;
1472 	data->chip->irq_eoi(data);
1473 }
1474 EXPORT_SYMBOL_GPL(irq_chip_eoi_parent);
1475 
1476 /**
1477  * irq_chip_set_affinity_parent - Set affinity on the parent interrupt
1478  * @data:	Pointer to interrupt specific data
1479  * @dest:	The affinity mask to set
1480  * @force:	Flag to enforce setting (disable online checks)
1481  *
1482  * Conditional, as the underlying parent chip might not implement it.
1483  */
irq_chip_set_affinity_parent(struct irq_data * data,const struct cpumask * dest,bool force)1484 int irq_chip_set_affinity_parent(struct irq_data *data,
1485 				 const struct cpumask *dest, bool force)
1486 {
1487 	data = data->parent_data;
1488 	if (data->chip->irq_set_affinity)
1489 		return data->chip->irq_set_affinity(data, dest, force);
1490 
1491 	return -ENOSYS;
1492 }
1493 EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent);
1494 
1495 /**
1496  * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
1497  * @data:	Pointer to interrupt specific data
1498  * @type:	IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
1499  *
1500  * Conditional, as the underlying parent chip might not implement it.
1501  */
irq_chip_set_type_parent(struct irq_data * data,unsigned int type)1502 int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
1503 {
1504 	data = data->parent_data;
1505 
1506 	if (data->chip->irq_set_type)
1507 		return data->chip->irq_set_type(data, type);
1508 
1509 	return -ENOSYS;
1510 }
1511 EXPORT_SYMBOL_GPL(irq_chip_set_type_parent);
1512 
1513 /**
1514  * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
1515  * @data:	Pointer to interrupt specific data
1516  *
1517  * Iterate through the domain hierarchy of the interrupt and check
1518  * whether a hw retrigger function exists. If yes, invoke it.
1519  */
irq_chip_retrigger_hierarchy(struct irq_data * data)1520 int irq_chip_retrigger_hierarchy(struct irq_data *data)
1521 {
1522 	for (data = data->parent_data; data; data = data->parent_data)
1523 		if (data->chip && data->chip->irq_retrigger)
1524 			return data->chip->irq_retrigger(data);
1525 
1526 	return 0;
1527 }
1528 EXPORT_SYMBOL_GPL(irq_chip_retrigger_hierarchy);
1529 
1530 /**
1531  * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt
1532  * @data:	Pointer to interrupt specific data
1533  * @vcpu_info:	The vcpu affinity information
1534  */
irq_chip_set_vcpu_affinity_parent(struct irq_data * data,void * vcpu_info)1535 int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
1536 {
1537 	data = data->parent_data;
1538 	if (data->chip->irq_set_vcpu_affinity)
1539 		return data->chip->irq_set_vcpu_affinity(data, vcpu_info);
1540 
1541 	return -ENOSYS;
1542 }
1543 EXPORT_SYMBOL_GPL(irq_chip_set_vcpu_affinity_parent);
1544 /**
1545  * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt
1546  * @data:	Pointer to interrupt specific data
1547  * @on:		Whether to set or reset the wake-up capability of this irq
1548  *
1549  * Conditional, as the underlying parent chip might not implement it.
1550  */
irq_chip_set_wake_parent(struct irq_data * data,unsigned int on)1551 int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
1552 {
1553 	data = data->parent_data;
1554 
1555 	if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE)
1556 		return 0;
1557 
1558 	if (data->chip->irq_set_wake)
1559 		return data->chip->irq_set_wake(data, on);
1560 
1561 	return -ENOSYS;
1562 }
1563 EXPORT_SYMBOL_GPL(irq_chip_set_wake_parent);
1564 
1565 /**
1566  * irq_chip_request_resources_parent - Request resources on the parent interrupt
1567  * @data:	Pointer to interrupt specific data
1568  */
irq_chip_request_resources_parent(struct irq_data * data)1569 int irq_chip_request_resources_parent(struct irq_data *data)
1570 {
1571 	data = data->parent_data;
1572 
1573 	if (data->chip->irq_request_resources)
1574 		return data->chip->irq_request_resources(data);
1575 
1576 	/* no error on missing optional irq_chip::irq_request_resources */
1577 	return 0;
1578 }
1579 EXPORT_SYMBOL_GPL(irq_chip_request_resources_parent);
1580 
1581 /**
1582  * irq_chip_release_resources_parent - Release resources on the parent interrupt
1583  * @data:	Pointer to interrupt specific data
1584  */
irq_chip_release_resources_parent(struct irq_data * data)1585 void irq_chip_release_resources_parent(struct irq_data *data)
1586 {
1587 	data = data->parent_data;
1588 	if (data->chip->irq_release_resources)
1589 		data->chip->irq_release_resources(data);
1590 }
1591 EXPORT_SYMBOL_GPL(irq_chip_release_resources_parent);
1592 #endif
1593 
1594 /**
1595  * irq_chip_compose_msi_msg - Compose msi message for a irq chip
1596  * @data:	Pointer to interrupt specific data
1597  * @msg:	Pointer to the MSI message
1598  *
1599  * For hierarchical domains we find the first chip in the hierarchy
1600  * which implements the irq_compose_msi_msg callback. For non
1601  * hierarchical we use the top level chip.
1602  */
irq_chip_compose_msi_msg(struct irq_data * data,struct msi_msg * msg)1603 int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1604 {
1605 	struct irq_data *pos;
1606 
1607 	for (pos = NULL; !pos && data; data = irqd_get_parent_data(data)) {
1608 		if (data->chip && data->chip->irq_compose_msi_msg)
1609 			pos = data;
1610 	}
1611 
1612 	if (!pos)
1613 		return -ENOSYS;
1614 
1615 	pos->chip->irq_compose_msi_msg(pos, msg);
1616 	return 0;
1617 }
1618 
irq_get_pm_device(struct irq_data * data)1619 static struct device *irq_get_pm_device(struct irq_data *data)
1620 {
1621 	if (data->domain)
1622 		return data->domain->pm_dev;
1623 
1624 	return NULL;
1625 }
1626 
1627 /**
1628  * irq_chip_pm_get - Enable power for an IRQ chip
1629  * @data:	Pointer to interrupt specific data
1630  *
1631  * Enable the power to the IRQ chip referenced by the interrupt data
1632  * structure.
1633  */
irq_chip_pm_get(struct irq_data * data)1634 int irq_chip_pm_get(struct irq_data *data)
1635 {
1636 	struct device *dev = irq_get_pm_device(data);
1637 	int retval = 0;
1638 
1639 	if (IS_ENABLED(CONFIG_PM) && dev)
1640 		retval = pm_runtime_resume_and_get(dev);
1641 
1642 	return retval;
1643 }
1644 
1645 /**
1646  * irq_chip_pm_put - Disable power for an IRQ chip
1647  * @data:	Pointer to interrupt specific data
1648  *
1649  * Disable the power to the IRQ chip referenced by the interrupt data
1650  * structure, belongs. Note that power will only be disabled, once this
1651  * function has been called for all IRQs that have called irq_chip_pm_get().
1652  */
irq_chip_pm_put(struct irq_data * data)1653 int irq_chip_pm_put(struct irq_data *data)
1654 {
1655 	struct device *dev = irq_get_pm_device(data);
1656 	int retval = 0;
1657 
1658 	if (IS_ENABLED(CONFIG_PM) && dev)
1659 		retval = pm_runtime_put(dev);
1660 
1661 	return (retval < 0) ? retval : 0;
1662 }
1663