• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4  * Copyright (C) 2005-2006 Thomas Gleixner
5  *
6  * This file contains driver APIs to the irq subsystem.
7  */
8 
9 #define pr_fmt(fmt) "genirq: " fmt
10 
11 #include <linux/irq.h>
12 #include <linux/kthread.h>
13 #include <linux/module.h>
14 #include <linux/random.h>
15 #include <linux/interrupt.h>
16 #include <linux/irqdomain.h>
17 #include <linux/slab.h>
18 #include <linux/sched.h>
19 #include <linux/sched/rt.h>
20 #include <linux/sched/task.h>
21 #include <uapi/linux/sched/types.h>
22 #include <linux/task_work.h>
23 
24 #include "internals.h"
25 
26 #ifdef CONFIG_IRQ_FORCED_THREADING
27 __read_mostly bool force_irqthreads;
28 EXPORT_SYMBOL_GPL(force_irqthreads);
29 
setup_forced_irqthreads(char * arg)30 static int __init setup_forced_irqthreads(char *arg)
31 {
32 	force_irqthreads = true;
33 	return 0;
34 }
35 early_param("threadirqs", setup_forced_irqthreads);
36 #endif
37 
__synchronize_hardirq(struct irq_desc * desc,bool sync_chip)38 static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
39 {
40 	struct irq_data *irqd = irq_desc_get_irq_data(desc);
41 	bool inprogress;
42 
43 	do {
44 		unsigned long flags;
45 
46 		/*
47 		 * Wait until we're out of the critical section.  This might
48 		 * give the wrong answer due to the lack of memory barriers.
49 		 */
50 		while (irqd_irq_inprogress(&desc->irq_data))
51 			cpu_relax();
52 
53 		/* Ok, that indicated we're done: double-check carefully. */
54 		raw_spin_lock_irqsave(&desc->lock, flags);
55 		inprogress = irqd_irq_inprogress(&desc->irq_data);
56 
57 		/*
58 		 * If requested and supported, check at the chip whether it
59 		 * is in flight at the hardware level, i.e. already pending
60 		 * in a CPU and waiting for service and acknowledge.
61 		 */
62 		if (!inprogress && sync_chip) {
63 			/*
64 			 * Ignore the return code. inprogress is only updated
65 			 * when the chip supports it.
66 			 */
67 			__irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE,
68 						&inprogress);
69 		}
70 		raw_spin_unlock_irqrestore(&desc->lock, flags);
71 
72 		/* Oops, that failed? */
73 	} while (inprogress);
74 }
75 
76 /**
77  *	synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
78  *	@irq: interrupt number to wait for
79  *
80  *	This function waits for any pending hard IRQ handlers for this
81  *	interrupt to complete before returning. If you use this
82  *	function while holding a resource the IRQ handler may need you
83  *	will deadlock. It does not take associated threaded handlers
84  *	into account.
85  *
86  *	Do not use this for shutdown scenarios where you must be sure
87  *	that all parts (hardirq and threaded handler) have completed.
88  *
89  *	Returns: false if a threaded handler is active.
90  *
91  *	This function may be called - with care - from IRQ context.
92  *
93  *	It does not check whether there is an interrupt in flight at the
94  *	hardware level, but not serviced yet, as this might deadlock when
95  *	called with interrupts disabled and the target CPU of the interrupt
96  *	is the current CPU.
97  */
synchronize_hardirq(unsigned int irq)98 bool synchronize_hardirq(unsigned int irq)
99 {
100 	struct irq_desc *desc = irq_to_desc(irq);
101 
102 	if (desc) {
103 		__synchronize_hardirq(desc, false);
104 		return !atomic_read(&desc->threads_active);
105 	}
106 
107 	return true;
108 }
109 EXPORT_SYMBOL(synchronize_hardirq);
110 
111 /**
112  *	synchronize_irq - wait for pending IRQ handlers (on other CPUs)
113  *	@irq: interrupt number to wait for
114  *
115  *	This function waits for any pending IRQ handlers for this interrupt
116  *	to complete before returning. If you use this function while
117  *	holding a resource the IRQ handler may need you will deadlock.
118  *
119  *	Can only be called from preemptible code as it might sleep when
120  *	an interrupt thread is associated to @irq.
121  *
122  *	It optionally makes sure (when the irq chip supports that method)
123  *	that the interrupt is not pending in any CPU and waiting for
124  *	service.
125  */
synchronize_irq(unsigned int irq)126 void synchronize_irq(unsigned int irq)
127 {
128 	struct irq_desc *desc = irq_to_desc(irq);
129 
130 	if (desc) {
131 		__synchronize_hardirq(desc, true);
132 		/*
133 		 * We made sure that no hardirq handler is
134 		 * running. Now verify that no threaded handlers are
135 		 * active.
136 		 */
137 		wait_event(desc->wait_for_threads,
138 			   !atomic_read(&desc->threads_active));
139 	}
140 }
141 EXPORT_SYMBOL(synchronize_irq);
142 
143 #ifdef CONFIG_SMP
144 cpumask_var_t irq_default_affinity;
145 
__irq_can_set_affinity(struct irq_desc * desc)146 static bool __irq_can_set_affinity(struct irq_desc *desc)
147 {
148 	if (!desc || !irqd_can_balance(&desc->irq_data) ||
149 	    !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
150 		return false;
151 	return true;
152 }
153 
154 /**
155  *	irq_can_set_affinity - Check if the affinity of a given irq can be set
156  *	@irq:		Interrupt to check
157  *
158  */
irq_can_set_affinity(unsigned int irq)159 int irq_can_set_affinity(unsigned int irq)
160 {
161 	return __irq_can_set_affinity(irq_to_desc(irq));
162 }
163 
164 /**
165  * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
166  * @irq:	Interrupt to check
167  *
168  * Like irq_can_set_affinity() above, but additionally checks for the
169  * AFFINITY_MANAGED flag.
170  */
irq_can_set_affinity_usr(unsigned int irq)171 bool irq_can_set_affinity_usr(unsigned int irq)
172 {
173 	struct irq_desc *desc = irq_to_desc(irq);
174 
175 	return __irq_can_set_affinity(desc) &&
176 		!irqd_affinity_is_managed(&desc->irq_data);
177 }
178 
179 /**
180  *	irq_set_thread_affinity - Notify irq threads to adjust affinity
181  *	@desc:		irq descriptor which has affitnity changed
182  *
183  *	We just set IRQTF_AFFINITY and delegate the affinity setting
184  *	to the interrupt thread itself. We can not call
185  *	set_cpus_allowed_ptr() here as we hold desc->lock and this
186  *	code can be called from hard interrupt context.
187  */
irq_set_thread_affinity(struct irq_desc * desc)188 void irq_set_thread_affinity(struct irq_desc *desc)
189 {
190 	struct irqaction *action;
191 
192 	for_each_action_of_desc(desc, action)
193 		if (action->thread)
194 			set_bit(IRQTF_AFFINITY, &action->thread_flags);
195 }
196 
197 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
irq_validate_effective_affinity(struct irq_data * data)198 static void irq_validate_effective_affinity(struct irq_data *data)
199 {
200 	const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
201 	struct irq_chip *chip = irq_data_get_irq_chip(data);
202 
203 	if (!cpumask_empty(m))
204 		return;
205 	pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
206 		     chip->name, data->irq);
207 }
208 
irq_init_effective_affinity(struct irq_data * data,const struct cpumask * mask)209 static inline void irq_init_effective_affinity(struct irq_data *data,
210 					       const struct cpumask *mask)
211 {
212 	cpumask_copy(irq_data_get_effective_affinity_mask(data), mask);
213 }
214 #else
irq_validate_effective_affinity(struct irq_data * data)215 static inline void irq_validate_effective_affinity(struct irq_data *data) { }
irq_init_effective_affinity(struct irq_data * data,const struct cpumask * mask)216 static inline void irq_init_effective_affinity(struct irq_data *data,
217 					       const struct cpumask *mask) { }
218 #endif
219 
irq_do_set_affinity(struct irq_data * data,const struct cpumask * mask,bool force)220 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
221 			bool force)
222 {
223 	struct irq_desc *desc = irq_data_to_desc(data);
224 	struct irq_chip *chip = irq_data_get_irq_chip(data);
225 	int ret;
226 
227 	if (!chip || !chip->irq_set_affinity)
228 		return -EINVAL;
229 
230 	ret = chip->irq_set_affinity(data, mask, force);
231 	switch (ret) {
232 	case IRQ_SET_MASK_OK:
233 	case IRQ_SET_MASK_OK_DONE:
234 		cpumask_copy(desc->irq_common_data.affinity, mask);
235 	case IRQ_SET_MASK_OK_NOCOPY:
236 		irq_validate_effective_affinity(data);
237 		irq_set_thread_affinity(desc);
238 		ret = 0;
239 	}
240 
241 	return ret;
242 }
243 
244 #ifdef CONFIG_GENERIC_PENDING_IRQ
irq_set_affinity_pending(struct irq_data * data,const struct cpumask * dest)245 static inline int irq_set_affinity_pending(struct irq_data *data,
246 					   const struct cpumask *dest)
247 {
248 	struct irq_desc *desc = irq_data_to_desc(data);
249 
250 	irqd_set_move_pending(data);
251 	irq_copy_pending(desc, dest);
252 	return 0;
253 }
254 #else
irq_set_affinity_pending(struct irq_data * data,const struct cpumask * dest)255 static inline int irq_set_affinity_pending(struct irq_data *data,
256 					   const struct cpumask *dest)
257 {
258 	return -EBUSY;
259 }
260 #endif
261 
irq_try_set_affinity(struct irq_data * data,const struct cpumask * dest,bool force)262 static int irq_try_set_affinity(struct irq_data *data,
263 				const struct cpumask *dest, bool force)
264 {
265 	int ret = irq_do_set_affinity(data, dest, force);
266 
267 	/*
268 	 * In case that the underlying vector management is busy and the
269 	 * architecture supports the generic pending mechanism then utilize
270 	 * this to avoid returning an error to user space.
271 	 */
272 	if (ret == -EBUSY && !force)
273 		ret = irq_set_affinity_pending(data, dest);
274 	return ret;
275 }
276 
irq_set_affinity_deactivated(struct irq_data * data,const struct cpumask * mask,bool force)277 static bool irq_set_affinity_deactivated(struct irq_data *data,
278 					 const struct cpumask *mask, bool force)
279 {
280 	struct irq_desc *desc = irq_data_to_desc(data);
281 
282 	/*
283 	 * Handle irq chips which can handle affinity only in activated
284 	 * state correctly
285 	 *
286 	 * If the interrupt is not yet activated, just store the affinity
287 	 * mask and do not call the chip driver at all. On activation the
288 	 * driver has to make sure anyway that the interrupt is in a
289 	 * useable state so startup works.
290 	 */
291 	if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) ||
292 	    irqd_is_activated(data) || !irqd_affinity_on_activate(data))
293 		return false;
294 
295 	cpumask_copy(desc->irq_common_data.affinity, mask);
296 	irq_init_effective_affinity(data, mask);
297 	irqd_set(data, IRQD_AFFINITY_SET);
298 	return true;
299 }
300 
irq_set_affinity_locked(struct irq_data * data,const struct cpumask * mask,bool force)301 int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
302 			    bool force)
303 {
304 	struct irq_chip *chip = irq_data_get_irq_chip(data);
305 	struct irq_desc *desc = irq_data_to_desc(data);
306 	int ret = 0;
307 
308 	if (!chip || !chip->irq_set_affinity)
309 		return -EINVAL;
310 
311 	if (irq_set_affinity_deactivated(data, mask, force))
312 		return 0;
313 
314 	if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
315 		ret = irq_try_set_affinity(data, mask, force);
316 	} else {
317 		irqd_set_move_pending(data);
318 		irq_copy_pending(desc, mask);
319 	}
320 
321 	if (desc->affinity_notify) {
322 		kref_get(&desc->affinity_notify->kref);
323 		if (!schedule_work(&desc->affinity_notify->work)) {
324 			/* Work was already scheduled, drop our extra ref */
325 			kref_put(&desc->affinity_notify->kref,
326 				 desc->affinity_notify->release);
327 		}
328 	}
329 	irqd_set(data, IRQD_AFFINITY_SET);
330 
331 	return ret;
332 }
333 
__irq_set_affinity(unsigned int irq,const struct cpumask * mask,bool force)334 int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
335 {
336 	struct irq_desc *desc = irq_to_desc(irq);
337 	unsigned long flags;
338 	int ret;
339 
340 	if (!desc)
341 		return -EINVAL;
342 
343 	raw_spin_lock_irqsave(&desc->lock, flags);
344 	ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
345 	raw_spin_unlock_irqrestore(&desc->lock, flags);
346 	return ret;
347 }
348 
irq_set_affinity_hint(unsigned int irq,const struct cpumask * m)349 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
350 {
351 	unsigned long flags;
352 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
353 
354 	if (!desc)
355 		return -EINVAL;
356 	desc->affinity_hint = m;
357 	irq_put_desc_unlock(desc, flags);
358 	/* set the initial affinity to prevent every interrupt being on CPU0 */
359 	if (m)
360 		__irq_set_affinity(irq, m, false);
361 	return 0;
362 }
363 EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
364 
irq_affinity_notify(struct work_struct * work)365 static void irq_affinity_notify(struct work_struct *work)
366 {
367 	struct irq_affinity_notify *notify =
368 		container_of(work, struct irq_affinity_notify, work);
369 	struct irq_desc *desc = irq_to_desc(notify->irq);
370 	cpumask_var_t cpumask;
371 	unsigned long flags;
372 
373 	if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
374 		goto out;
375 
376 	raw_spin_lock_irqsave(&desc->lock, flags);
377 	if (irq_move_pending(&desc->irq_data))
378 		irq_get_pending(cpumask, desc);
379 	else
380 		cpumask_copy(cpumask, desc->irq_common_data.affinity);
381 	raw_spin_unlock_irqrestore(&desc->lock, flags);
382 
383 	notify->notify(notify, cpumask);
384 
385 	free_cpumask_var(cpumask);
386 out:
387 	kref_put(&notify->kref, notify->release);
388 }
389 
390 /**
391  *	irq_set_affinity_notifier - control notification of IRQ affinity changes
392  *	@irq:		Interrupt for which to enable/disable notification
393  *	@notify:	Context for notification, or %NULL to disable
394  *			notification.  Function pointers must be initialised;
395  *			the other fields will be initialised by this function.
396  *
397  *	Must be called in process context.  Notification may only be enabled
398  *	after the IRQ is allocated and must be disabled before the IRQ is
399  *	freed using free_irq().
400  */
401 int
irq_set_affinity_notifier(unsigned int irq,struct irq_affinity_notify * notify)402 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
403 {
404 	struct irq_desc *desc = irq_to_desc(irq);
405 	struct irq_affinity_notify *old_notify;
406 	unsigned long flags;
407 
408 	/* The release function is promised process context */
409 	might_sleep();
410 
411 	if (!desc)
412 		return -EINVAL;
413 
414 	/* Complete initialisation of *notify */
415 	if (notify) {
416 		notify->irq = irq;
417 		kref_init(&notify->kref);
418 		INIT_WORK(&notify->work, irq_affinity_notify);
419 	}
420 
421 	raw_spin_lock_irqsave(&desc->lock, flags);
422 	old_notify = desc->affinity_notify;
423 	desc->affinity_notify = notify;
424 	raw_spin_unlock_irqrestore(&desc->lock, flags);
425 
426 	if (old_notify) {
427 		if (cancel_work_sync(&old_notify->work)) {
428 			/* Pending work had a ref, put that one too */
429 			kref_put(&old_notify->kref, old_notify->release);
430 		}
431 		kref_put(&old_notify->kref, old_notify->release);
432 	}
433 
434 	return 0;
435 }
436 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
437 
438 #ifndef CONFIG_AUTO_IRQ_AFFINITY
439 /*
440  * Generic version of the affinity autoselector.
441  */
irq_setup_affinity(struct irq_desc * desc)442 int irq_setup_affinity(struct irq_desc *desc)
443 {
444 	struct cpumask *set = irq_default_affinity;
445 	int ret, node = irq_desc_get_node(desc);
446 	static DEFINE_RAW_SPINLOCK(mask_lock);
447 	static struct cpumask mask;
448 
449 	/* Excludes PER_CPU and NO_BALANCE interrupts */
450 	if (!__irq_can_set_affinity(desc))
451 		return 0;
452 
453 	raw_spin_lock(&mask_lock);
454 	/*
455 	 * Preserve the managed affinity setting and a userspace affinity
456 	 * setup, but make sure that one of the targets is online.
457 	 */
458 	if (irqd_affinity_is_managed(&desc->irq_data) ||
459 	    irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
460 		if (cpumask_intersects(desc->irq_common_data.affinity,
461 				       cpu_online_mask))
462 			set = desc->irq_common_data.affinity;
463 		else
464 			irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
465 	}
466 
467 	cpumask_and(&mask, cpu_online_mask, set);
468 	if (cpumask_empty(&mask))
469 		cpumask_copy(&mask, cpu_online_mask);
470 
471 	if (node != NUMA_NO_NODE) {
472 		const struct cpumask *nodemask = cpumask_of_node(node);
473 
474 		/* make sure at least one of the cpus in nodemask is online */
475 		if (cpumask_intersects(&mask, nodemask))
476 			cpumask_and(&mask, &mask, nodemask);
477 	}
478 	ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
479 	raw_spin_unlock(&mask_lock);
480 	return ret;
481 }
482 #else
483 /* Wrapper for ALPHA specific affinity selector magic */
irq_setup_affinity(struct irq_desc * desc)484 int irq_setup_affinity(struct irq_desc *desc)
485 {
486 	return irq_select_affinity(irq_desc_get_irq(desc));
487 }
488 #endif /* CONFIG_AUTO_IRQ_AFFINITY */
489 #endif /* CONFIG_SMP */
490 
491 
492 /**
493  *	irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
494  *	@irq: interrupt number to set affinity
495  *	@vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
496  *	            specific data for percpu_devid interrupts
497  *
498  *	This function uses the vCPU specific data to set the vCPU
499  *	affinity for an irq. The vCPU specific data is passed from
500  *	outside, such as KVM. One example code path is as below:
501  *	KVM -> IOMMU -> irq_set_vcpu_affinity().
502  */
irq_set_vcpu_affinity(unsigned int irq,void * vcpu_info)503 int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
504 {
505 	unsigned long flags;
506 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
507 	struct irq_data *data;
508 	struct irq_chip *chip;
509 	int ret = -ENOSYS;
510 
511 	if (!desc)
512 		return -EINVAL;
513 
514 	data = irq_desc_get_irq_data(desc);
515 	do {
516 		chip = irq_data_get_irq_chip(data);
517 		if (chip && chip->irq_set_vcpu_affinity)
518 			break;
519 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
520 		data = data->parent_data;
521 #else
522 		data = NULL;
523 #endif
524 	} while (data);
525 
526 	if (data)
527 		ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
528 	irq_put_desc_unlock(desc, flags);
529 
530 	return ret;
531 }
532 EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
533 
__disable_irq(struct irq_desc * desc)534 void __disable_irq(struct irq_desc *desc)
535 {
536 	if (!desc->depth++)
537 		irq_disable(desc);
538 }
539 
__disable_irq_nosync(unsigned int irq)540 static int __disable_irq_nosync(unsigned int irq)
541 {
542 	unsigned long flags;
543 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
544 
545 	if (!desc)
546 		return -EINVAL;
547 	__disable_irq(desc);
548 	irq_put_desc_busunlock(desc, flags);
549 	return 0;
550 }
551 
552 /**
553  *	disable_irq_nosync - disable an irq without waiting
554  *	@irq: Interrupt to disable
555  *
556  *	Disable the selected interrupt line.  Disables and Enables are
557  *	nested.
558  *	Unlike disable_irq(), this function does not ensure existing
559  *	instances of the IRQ handler have completed before returning.
560  *
561  *	This function may be called from IRQ context.
562  */
disable_irq_nosync(unsigned int irq)563 void disable_irq_nosync(unsigned int irq)
564 {
565 	__disable_irq_nosync(irq);
566 }
567 EXPORT_SYMBOL(disable_irq_nosync);
568 
569 /**
570  *	disable_irq - disable an irq and wait for completion
571  *	@irq: Interrupt to disable
572  *
573  *	Disable the selected interrupt line.  Enables and Disables are
574  *	nested.
575  *	This function waits for any pending IRQ handlers for this interrupt
576  *	to complete before returning. If you use this function while
577  *	holding a resource the IRQ handler may need you will deadlock.
578  *
579  *	This function may be called - with care - from IRQ context.
580  */
disable_irq(unsigned int irq)581 void disable_irq(unsigned int irq)
582 {
583 	if (!__disable_irq_nosync(irq))
584 		synchronize_irq(irq);
585 }
586 EXPORT_SYMBOL(disable_irq);
587 
588 /**
589  *	disable_hardirq - disables an irq and waits for hardirq completion
590  *	@irq: Interrupt to disable
591  *
592  *	Disable the selected interrupt line.  Enables and Disables are
593  *	nested.
594  *	This function waits for any pending hard IRQ handlers for this
595  *	interrupt to complete before returning. If you use this function while
596  *	holding a resource the hard IRQ handler may need you will deadlock.
597  *
598  *	When used to optimistically disable an interrupt from atomic context
599  *	the return value must be checked.
600  *
601  *	Returns: false if a threaded handler is active.
602  *
603  *	This function may be called - with care - from IRQ context.
604  */
disable_hardirq(unsigned int irq)605 bool disable_hardirq(unsigned int irq)
606 {
607 	if (!__disable_irq_nosync(irq))
608 		return synchronize_hardirq(irq);
609 
610 	return false;
611 }
612 EXPORT_SYMBOL_GPL(disable_hardirq);
613 
__enable_irq(struct irq_desc * desc)614 void __enable_irq(struct irq_desc *desc)
615 {
616 	switch (desc->depth) {
617 	case 0:
618  err_out:
619 		WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
620 		     irq_desc_get_irq(desc));
621 		break;
622 	case 1: {
623 		if (desc->istate & IRQS_SUSPENDED)
624 			goto err_out;
625 		/* Prevent probing on this irq: */
626 		irq_settings_set_noprobe(desc);
627 		/*
628 		 * Call irq_startup() not irq_enable() here because the
629 		 * interrupt might be marked NOAUTOEN. So irq_startup()
630 		 * needs to be invoked when it gets enabled the first
631 		 * time. If it was already started up, then irq_startup()
632 		 * will invoke irq_enable() under the hood.
633 		 */
634 		irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
635 		break;
636 	}
637 	default:
638 		desc->depth--;
639 	}
640 }
641 
642 /**
643  *	enable_irq - enable handling of an irq
644  *	@irq: Interrupt to enable
645  *
646  *	Undoes the effect of one call to disable_irq().  If this
647  *	matches the last disable, processing of interrupts on this
648  *	IRQ line is re-enabled.
649  *
650  *	This function may be called from IRQ context only when
651  *	desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
652  */
enable_irq(unsigned int irq)653 void enable_irq(unsigned int irq)
654 {
655 	unsigned long flags;
656 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
657 
658 	if (!desc)
659 		return;
660 	if (WARN(!desc->irq_data.chip,
661 		 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
662 		goto out;
663 
664 	__enable_irq(desc);
665 out:
666 	irq_put_desc_busunlock(desc, flags);
667 }
668 EXPORT_SYMBOL(enable_irq);
669 
set_irq_wake_real(unsigned int irq,unsigned int on)670 static int set_irq_wake_real(unsigned int irq, unsigned int on)
671 {
672 	struct irq_desc *desc = irq_to_desc(irq);
673 	int ret = -ENXIO;
674 
675 	if (irq_desc_get_chip(desc)->flags &  IRQCHIP_SKIP_SET_WAKE)
676 		return 0;
677 
678 	if (desc->irq_data.chip->irq_set_wake)
679 		ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
680 
681 	return ret;
682 }
683 
684 /**
685  *	irq_set_irq_wake - control irq power management wakeup
686  *	@irq:	interrupt to control
687  *	@on:	enable/disable power management wakeup
688  *
689  *	Enable/disable power management wakeup mode, which is
690  *	disabled by default.  Enables and disables must match,
691  *	just as they match for non-wakeup mode support.
692  *
693  *	Wakeup mode lets this IRQ wake the system from sleep
694  *	states like "suspend to RAM".
695  */
irq_set_irq_wake(unsigned int irq,unsigned int on)696 int irq_set_irq_wake(unsigned int irq, unsigned int on)
697 {
698 	unsigned long flags;
699 	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
700 	int ret = 0;
701 
702 	if (!desc)
703 		return -EINVAL;
704 
705 	/* wakeup-capable irqs can be shared between drivers that
706 	 * don't need to have the same sleep mode behaviors.
707 	 */
708 	if (on) {
709 		if (desc->wake_depth++ == 0) {
710 			ret = set_irq_wake_real(irq, on);
711 			if (ret)
712 				desc->wake_depth = 0;
713 			else
714 				irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
715 		}
716 	} else {
717 		if (desc->wake_depth == 0) {
718 			WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
719 		} else if (--desc->wake_depth == 0) {
720 			ret = set_irq_wake_real(irq, on);
721 			if (ret)
722 				desc->wake_depth = 1;
723 			else
724 				irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
725 		}
726 	}
727 	irq_put_desc_busunlock(desc, flags);
728 	return ret;
729 }
730 EXPORT_SYMBOL(irq_set_irq_wake);
731 
732 /*
733  * Internal function that tells the architecture code whether a
734  * particular irq has been exclusively allocated or is available
735  * for driver use.
736  */
can_request_irq(unsigned int irq,unsigned long irqflags)737 int can_request_irq(unsigned int irq, unsigned long irqflags)
738 {
739 	unsigned long flags;
740 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
741 	int canrequest = 0;
742 
743 	if (!desc)
744 		return 0;
745 
746 	if (irq_settings_can_request(desc)) {
747 		if (!desc->action ||
748 		    irqflags & desc->action->flags & IRQF_SHARED)
749 			canrequest = 1;
750 	}
751 	irq_put_desc_unlock(desc, flags);
752 	return canrequest;
753 }
754 
__irq_set_trigger(struct irq_desc * desc,unsigned long flags)755 int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
756 {
757 	struct irq_chip *chip = desc->irq_data.chip;
758 	int ret, unmask = 0;
759 
760 	if (!chip || !chip->irq_set_type) {
761 		/*
762 		 * IRQF_TRIGGER_* but the PIC does not support multiple
763 		 * flow-types?
764 		 */
765 		pr_debug("No set_type function for IRQ %d (%s)\n",
766 			 irq_desc_get_irq(desc),
767 			 chip ? (chip->name ? : "unknown") : "unknown");
768 		return 0;
769 	}
770 
771 	if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
772 		if (!irqd_irq_masked(&desc->irq_data))
773 			mask_irq(desc);
774 		if (!irqd_irq_disabled(&desc->irq_data))
775 			unmask = 1;
776 	}
777 
778 	/* Mask all flags except trigger mode */
779 	flags &= IRQ_TYPE_SENSE_MASK;
780 	ret = chip->irq_set_type(&desc->irq_data, flags);
781 
782 	switch (ret) {
783 	case IRQ_SET_MASK_OK:
784 	case IRQ_SET_MASK_OK_DONE:
785 		irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
786 		irqd_set(&desc->irq_data, flags);
787 
788 	case IRQ_SET_MASK_OK_NOCOPY:
789 		flags = irqd_get_trigger_type(&desc->irq_data);
790 		irq_settings_set_trigger_mask(desc, flags);
791 		irqd_clear(&desc->irq_data, IRQD_LEVEL);
792 		irq_settings_clr_level(desc);
793 		if (flags & IRQ_TYPE_LEVEL_MASK) {
794 			irq_settings_set_level(desc);
795 			irqd_set(&desc->irq_data, IRQD_LEVEL);
796 		}
797 
798 		ret = 0;
799 		break;
800 	default:
801 		pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
802 		       flags, irq_desc_get_irq(desc), chip->irq_set_type);
803 	}
804 	if (unmask)
805 		unmask_irq(desc);
806 	return ret;
807 }
808 
809 #ifdef CONFIG_HARDIRQS_SW_RESEND
irq_set_parent(int irq,int parent_irq)810 int irq_set_parent(int irq, int parent_irq)
811 {
812 	unsigned long flags;
813 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
814 
815 	if (!desc)
816 		return -EINVAL;
817 
818 	desc->parent_irq = parent_irq;
819 
820 	irq_put_desc_unlock(desc, flags);
821 	return 0;
822 }
823 EXPORT_SYMBOL_GPL(irq_set_parent);
824 #endif
825 
826 /*
827  * Default primary interrupt handler for threaded interrupts. Is
828  * assigned as primary handler when request_threaded_irq is called
829  * with handler == NULL. Useful for oneshot interrupts.
830  */
irq_default_primary_handler(int irq,void * dev_id)831 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
832 {
833 	return IRQ_WAKE_THREAD;
834 }
835 
836 /*
837  * Primary handler for nested threaded interrupts. Should never be
838  * called.
839  */
irq_nested_primary_handler(int irq,void * dev_id)840 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
841 {
842 	WARN(1, "Primary handler called for nested irq %d\n", irq);
843 	return IRQ_NONE;
844 }
845 
irq_forced_secondary_handler(int irq,void * dev_id)846 static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
847 {
848 	WARN(1, "Secondary action handler called for irq %d\n", irq);
849 	return IRQ_NONE;
850 }
851 
irq_wait_for_interrupt(struct irqaction * action)852 static int irq_wait_for_interrupt(struct irqaction *action)
853 {
854 	for (;;) {
855 		set_current_state(TASK_INTERRUPTIBLE);
856 
857 		if (kthread_should_stop()) {
858 			/* may need to run one last time */
859 			if (test_and_clear_bit(IRQTF_RUNTHREAD,
860 					       &action->thread_flags)) {
861 				__set_current_state(TASK_RUNNING);
862 				return 0;
863 			}
864 			__set_current_state(TASK_RUNNING);
865 			return -1;
866 		}
867 
868 		if (test_and_clear_bit(IRQTF_RUNTHREAD,
869 				       &action->thread_flags)) {
870 			__set_current_state(TASK_RUNNING);
871 			return 0;
872 		}
873 		schedule();
874 	}
875 }
876 
877 /*
878  * Oneshot interrupts keep the irq line masked until the threaded
879  * handler finished. unmask if the interrupt has not been disabled and
880  * is marked MASKED.
881  */
irq_finalize_oneshot(struct irq_desc * desc,struct irqaction * action)882 static void irq_finalize_oneshot(struct irq_desc *desc,
883 				 struct irqaction *action)
884 {
885 	if (!(desc->istate & IRQS_ONESHOT) ||
886 	    action->handler == irq_forced_secondary_handler)
887 		return;
888 again:
889 	chip_bus_lock(desc);
890 	raw_spin_lock_irq(&desc->lock);
891 
892 	/*
893 	 * Implausible though it may be we need to protect us against
894 	 * the following scenario:
895 	 *
896 	 * The thread is faster done than the hard interrupt handler
897 	 * on the other CPU. If we unmask the irq line then the
898 	 * interrupt can come in again and masks the line, leaves due
899 	 * to IRQS_INPROGRESS and the irq line is masked forever.
900 	 *
901 	 * This also serializes the state of shared oneshot handlers
902 	 * versus "desc->threads_onehsot |= action->thread_mask;" in
903 	 * irq_wake_thread(). See the comment there which explains the
904 	 * serialization.
905 	 */
906 	if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
907 		raw_spin_unlock_irq(&desc->lock);
908 		chip_bus_sync_unlock(desc);
909 		cpu_relax();
910 		goto again;
911 	}
912 
913 	/*
914 	 * Now check again, whether the thread should run. Otherwise
915 	 * we would clear the threads_oneshot bit of this thread which
916 	 * was just set.
917 	 */
918 	if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
919 		goto out_unlock;
920 
921 	desc->threads_oneshot &= ~action->thread_mask;
922 
923 	if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
924 	    irqd_irq_masked(&desc->irq_data))
925 		unmask_threaded_irq(desc);
926 
927 out_unlock:
928 	raw_spin_unlock_irq(&desc->lock);
929 	chip_bus_sync_unlock(desc);
930 }
931 
932 #ifdef CONFIG_SMP
933 /*
934  * Check whether we need to change the affinity of the interrupt thread.
935  */
936 static void
irq_thread_check_affinity(struct irq_desc * desc,struct irqaction * action)937 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
938 {
939 	cpumask_var_t mask;
940 	bool valid = true;
941 
942 	if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
943 		return;
944 
945 	/*
946 	 * In case we are out of memory we set IRQTF_AFFINITY again and
947 	 * try again next time
948 	 */
949 	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
950 		set_bit(IRQTF_AFFINITY, &action->thread_flags);
951 		return;
952 	}
953 
954 	raw_spin_lock_irq(&desc->lock);
955 	/*
956 	 * This code is triggered unconditionally. Check the affinity
957 	 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
958 	 */
959 	if (cpumask_available(desc->irq_common_data.affinity)) {
960 		const struct cpumask *m;
961 
962 		m = irq_data_get_effective_affinity_mask(&desc->irq_data);
963 		cpumask_copy(mask, m);
964 	} else {
965 		valid = false;
966 	}
967 	raw_spin_unlock_irq(&desc->lock);
968 
969 	if (valid)
970 		set_cpus_allowed_ptr(current, mask);
971 	free_cpumask_var(mask);
972 }
973 #else
974 static inline void
irq_thread_check_affinity(struct irq_desc * desc,struct irqaction * action)975 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
976 #endif
977 
978 /*
979  * Interrupts which are not explicitely requested as threaded
980  * interrupts rely on the implicit bh/preempt disable of the hard irq
981  * context. So we need to disable bh here to avoid deadlocks and other
982  * side effects.
983  */
984 static irqreturn_t
irq_forced_thread_fn(struct irq_desc * desc,struct irqaction * action)985 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
986 {
987 	irqreturn_t ret;
988 
989 	local_bh_disable();
990 	ret = action->thread_fn(action->irq, action->dev_id);
991 	if (ret == IRQ_HANDLED)
992 		atomic_inc(&desc->threads_handled);
993 
994 	irq_finalize_oneshot(desc, action);
995 	local_bh_enable();
996 	return ret;
997 }
998 
999 /*
1000  * Interrupts explicitly requested as threaded interrupts want to be
1001  * preemtible - many of them need to sleep and wait for slow busses to
1002  * complete.
1003  */
irq_thread_fn(struct irq_desc * desc,struct irqaction * action)1004 static irqreturn_t irq_thread_fn(struct irq_desc *desc,
1005 		struct irqaction *action)
1006 {
1007 	irqreturn_t ret;
1008 
1009 	ret = action->thread_fn(action->irq, action->dev_id);
1010 	if (ret == IRQ_HANDLED)
1011 		atomic_inc(&desc->threads_handled);
1012 
1013 	irq_finalize_oneshot(desc, action);
1014 	return ret;
1015 }
1016 
wake_threads_waitq(struct irq_desc * desc)1017 static void wake_threads_waitq(struct irq_desc *desc)
1018 {
1019 	if (atomic_dec_and_test(&desc->threads_active))
1020 		wake_up(&desc->wait_for_threads);
1021 }
1022 
irq_thread_dtor(struct callback_head * unused)1023 static void irq_thread_dtor(struct callback_head *unused)
1024 {
1025 	struct task_struct *tsk = current;
1026 	struct irq_desc *desc;
1027 	struct irqaction *action;
1028 
1029 	if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
1030 		return;
1031 
1032 	action = kthread_data(tsk);
1033 
1034 	pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
1035 	       tsk->comm, tsk->pid, action->irq);
1036 
1037 
1038 	desc = irq_to_desc(action->irq);
1039 	/*
1040 	 * If IRQTF_RUNTHREAD is set, we need to decrement
1041 	 * desc->threads_active and wake possible waiters.
1042 	 */
1043 	if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1044 		wake_threads_waitq(desc);
1045 
1046 	/* Prevent a stale desc->threads_oneshot */
1047 	irq_finalize_oneshot(desc, action);
1048 }
1049 
irq_wake_secondary(struct irq_desc * desc,struct irqaction * action)1050 static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
1051 {
1052 	struct irqaction *secondary = action->secondary;
1053 
1054 	if (WARN_ON_ONCE(!secondary))
1055 		return;
1056 
1057 	raw_spin_lock_irq(&desc->lock);
1058 	__irq_wake_thread(desc, secondary);
1059 	raw_spin_unlock_irq(&desc->lock);
1060 }
1061 
1062 /*
1063  * Interrupt handler thread
1064  */
irq_thread(void * data)1065 static int irq_thread(void *data)
1066 {
1067 	struct callback_head on_exit_work;
1068 	struct irqaction *action = data;
1069 	struct irq_desc *desc = irq_to_desc(action->irq);
1070 	irqreturn_t (*handler_fn)(struct irq_desc *desc,
1071 			struct irqaction *action);
1072 
1073 	if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
1074 					&action->thread_flags))
1075 		handler_fn = irq_forced_thread_fn;
1076 	else
1077 		handler_fn = irq_thread_fn;
1078 
1079 	init_task_work(&on_exit_work, irq_thread_dtor);
1080 	task_work_add(current, &on_exit_work, false);
1081 
1082 	irq_thread_check_affinity(desc, action);
1083 
1084 	while (!irq_wait_for_interrupt(action)) {
1085 		irqreturn_t action_ret;
1086 
1087 		irq_thread_check_affinity(desc, action);
1088 
1089 		action_ret = handler_fn(desc, action);
1090 		if (action_ret == IRQ_WAKE_THREAD)
1091 			irq_wake_secondary(desc, action);
1092 
1093 		wake_threads_waitq(desc);
1094 	}
1095 
1096 	/*
1097 	 * This is the regular exit path. __free_irq() is stopping the
1098 	 * thread via kthread_stop() after calling
1099 	 * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
1100 	 * oneshot mask bit can be set.
1101 	 */
1102 	task_work_cancel(current, irq_thread_dtor);
1103 	return 0;
1104 }
1105 
1106 /**
1107  *	irq_wake_thread - wake the irq thread for the action identified by dev_id
1108  *	@irq:		Interrupt line
1109  *	@dev_id:	Device identity for which the thread should be woken
1110  *
1111  */
irq_wake_thread(unsigned int irq,void * dev_id)1112 void irq_wake_thread(unsigned int irq, void *dev_id)
1113 {
1114 	struct irq_desc *desc = irq_to_desc(irq);
1115 	struct irqaction *action;
1116 	unsigned long flags;
1117 
1118 	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1119 		return;
1120 
1121 	raw_spin_lock_irqsave(&desc->lock, flags);
1122 	for_each_action_of_desc(desc, action) {
1123 		if (action->dev_id == dev_id) {
1124 			if (action->thread)
1125 				__irq_wake_thread(desc, action);
1126 			break;
1127 		}
1128 	}
1129 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1130 }
1131 EXPORT_SYMBOL_GPL(irq_wake_thread);
1132 
irq_setup_forced_threading(struct irqaction * new)1133 static int irq_setup_forced_threading(struct irqaction *new)
1134 {
1135 	if (!force_irqthreads)
1136 		return 0;
1137 	if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1138 		return 0;
1139 
1140 	/*
1141 	 * No further action required for interrupts which are requested as
1142 	 * threaded interrupts already
1143 	 */
1144 	if (new->handler == irq_default_primary_handler)
1145 		return 0;
1146 
1147 	new->flags |= IRQF_ONESHOT;
1148 
1149 	/*
1150 	 * Handle the case where we have a real primary handler and a
1151 	 * thread handler. We force thread them as well by creating a
1152 	 * secondary action.
1153 	 */
1154 	if (new->handler && new->thread_fn) {
1155 		/* Allocate the secondary action */
1156 		new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1157 		if (!new->secondary)
1158 			return -ENOMEM;
1159 		new->secondary->handler = irq_forced_secondary_handler;
1160 		new->secondary->thread_fn = new->thread_fn;
1161 		new->secondary->dev_id = new->dev_id;
1162 		new->secondary->irq = new->irq;
1163 		new->secondary->name = new->name;
1164 	}
1165 	/* Deal with the primary handler */
1166 	set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1167 	new->thread_fn = new->handler;
1168 	new->handler = irq_default_primary_handler;
1169 	return 0;
1170 }
1171 
irq_request_resources(struct irq_desc * desc)1172 static int irq_request_resources(struct irq_desc *desc)
1173 {
1174 	struct irq_data *d = &desc->irq_data;
1175 	struct irq_chip *c = d->chip;
1176 
1177 	return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1178 }
1179 
irq_release_resources(struct irq_desc * desc)1180 static void irq_release_resources(struct irq_desc *desc)
1181 {
1182 	struct irq_data *d = &desc->irq_data;
1183 	struct irq_chip *c = d->chip;
1184 
1185 	if (c->irq_release_resources)
1186 		c->irq_release_resources(d);
1187 }
1188 
1189 static int
setup_irq_thread(struct irqaction * new,unsigned int irq,bool secondary)1190 setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1191 {
1192 	struct task_struct *t;
1193 	struct sched_param param = {
1194 		.sched_priority = MAX_USER_RT_PRIO/2,
1195 	};
1196 
1197 	if (!secondary) {
1198 		t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1199 				   new->name);
1200 	} else {
1201 		t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1202 				   new->name);
1203 		param.sched_priority -= 1;
1204 	}
1205 
1206 	if (IS_ERR(t))
1207 		return PTR_ERR(t);
1208 
1209 	sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
1210 
1211 	/*
1212 	 * We keep the reference to the task struct even if
1213 	 * the thread dies to avoid that the interrupt code
1214 	 * references an already freed task_struct.
1215 	 */
1216 	get_task_struct(t);
1217 	new->thread = t;
1218 	/*
1219 	 * Tell the thread to set its affinity. This is
1220 	 * important for shared interrupt handlers as we do
1221 	 * not invoke setup_affinity() for the secondary
1222 	 * handlers as everything is already set up. Even for
1223 	 * interrupts marked with IRQF_NO_BALANCE this is
1224 	 * correct as we want the thread to move to the cpu(s)
1225 	 * on which the requesting code placed the interrupt.
1226 	 */
1227 	set_bit(IRQTF_AFFINITY, &new->thread_flags);
1228 	return 0;
1229 }
1230 
1231 /*
1232  * Internal function to register an irqaction - typically used to
1233  * allocate special interrupts that are part of the architecture.
1234  *
1235  * Locking rules:
1236  *
1237  * desc->request_mutex	Provides serialization against a concurrent free_irq()
1238  *   chip_bus_lock	Provides serialization for slow bus operations
1239  *     desc->lock	Provides serialization against hard interrupts
1240  *
1241  * chip_bus_lock and desc->lock are sufficient for all other management and
1242  * interrupt related functions. desc->request_mutex solely serializes
1243  * request/free_irq().
1244  */
1245 static int
__setup_irq(unsigned int irq,struct irq_desc * desc,struct irqaction * new)1246 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1247 {
1248 	struct irqaction *old, **old_ptr;
1249 	unsigned long flags, thread_mask = 0;
1250 	int ret, nested, shared = 0;
1251 
1252 	if (!desc)
1253 		return -EINVAL;
1254 
1255 	if (desc->irq_data.chip == &no_irq_chip)
1256 		return -ENOSYS;
1257 	if (!try_module_get(desc->owner))
1258 		return -ENODEV;
1259 
1260 	new->irq = irq;
1261 
1262 	/*
1263 	 * If the trigger type is not specified by the caller,
1264 	 * then use the default for this interrupt.
1265 	 */
1266 	if (!(new->flags & IRQF_TRIGGER_MASK))
1267 		new->flags |= irqd_get_trigger_type(&desc->irq_data);
1268 
1269 	/*
1270 	 * Check whether the interrupt nests into another interrupt
1271 	 * thread.
1272 	 */
1273 	nested = irq_settings_is_nested_thread(desc);
1274 	if (nested) {
1275 		if (!new->thread_fn) {
1276 			ret = -EINVAL;
1277 			goto out_mput;
1278 		}
1279 		/*
1280 		 * Replace the primary handler which was provided from
1281 		 * the driver for non nested interrupt handling by the
1282 		 * dummy function which warns when called.
1283 		 */
1284 		new->handler = irq_nested_primary_handler;
1285 	} else {
1286 		if (irq_settings_can_thread(desc)) {
1287 			ret = irq_setup_forced_threading(new);
1288 			if (ret)
1289 				goto out_mput;
1290 		}
1291 	}
1292 
1293 	/*
1294 	 * Create a handler thread when a thread function is supplied
1295 	 * and the interrupt does not nest into another interrupt
1296 	 * thread.
1297 	 */
1298 	if (new->thread_fn && !nested) {
1299 		ret = setup_irq_thread(new, irq, false);
1300 		if (ret)
1301 			goto out_mput;
1302 		if (new->secondary) {
1303 			ret = setup_irq_thread(new->secondary, irq, true);
1304 			if (ret)
1305 				goto out_thread;
1306 		}
1307 	}
1308 
1309 	/*
1310 	 * Drivers are often written to work w/o knowledge about the
1311 	 * underlying irq chip implementation, so a request for a
1312 	 * threaded irq without a primary hard irq context handler
1313 	 * requires the ONESHOT flag to be set. Some irq chips like
1314 	 * MSI based interrupts are per se one shot safe. Check the
1315 	 * chip flags, so we can avoid the unmask dance at the end of
1316 	 * the threaded handler for those.
1317 	 */
1318 	if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1319 		new->flags &= ~IRQF_ONESHOT;
1320 
1321 	/*
1322 	 * Protects against a concurrent __free_irq() call which might wait
1323 	 * for synchronize_hardirq() to complete without holding the optional
1324 	 * chip bus lock and desc->lock. Also protects against handing out
1325 	 * a recycled oneshot thread_mask bit while it's still in use by
1326 	 * its previous owner.
1327 	 */
1328 	mutex_lock(&desc->request_mutex);
1329 
1330 	/*
1331 	 * Acquire bus lock as the irq_request_resources() callback below
1332 	 * might rely on the serialization or the magic power management
1333 	 * functions which are abusing the irq_bus_lock() callback,
1334 	 */
1335 	chip_bus_lock(desc);
1336 
1337 	/* First installed action requests resources. */
1338 	if (!desc->action) {
1339 		ret = irq_request_resources(desc);
1340 		if (ret) {
1341 			pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1342 			       new->name, irq, desc->irq_data.chip->name);
1343 			goto out_bus_unlock;
1344 		}
1345 	}
1346 
1347 	/*
1348 	 * The following block of code has to be executed atomically
1349 	 * protected against a concurrent interrupt and any of the other
1350 	 * management calls which are not serialized via
1351 	 * desc->request_mutex or the optional bus lock.
1352 	 */
1353 	raw_spin_lock_irqsave(&desc->lock, flags);
1354 	old_ptr = &desc->action;
1355 	old = *old_ptr;
1356 	if (old) {
1357 		/*
1358 		 * Can't share interrupts unless both agree to and are
1359 		 * the same type (level, edge, polarity). So both flag
1360 		 * fields must have IRQF_SHARED set and the bits which
1361 		 * set the trigger type must match. Also all must
1362 		 * agree on ONESHOT.
1363 		 */
1364 		unsigned int oldtype;
1365 
1366 		/*
1367 		 * If nobody did set the configuration before, inherit
1368 		 * the one provided by the requester.
1369 		 */
1370 		if (irqd_trigger_type_was_set(&desc->irq_data)) {
1371 			oldtype = irqd_get_trigger_type(&desc->irq_data);
1372 		} else {
1373 			oldtype = new->flags & IRQF_TRIGGER_MASK;
1374 			irqd_set_trigger_type(&desc->irq_data, oldtype);
1375 		}
1376 
1377 		if (!((old->flags & new->flags) & IRQF_SHARED) ||
1378 		    (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1379 		    ((old->flags ^ new->flags) & IRQF_ONESHOT))
1380 			goto mismatch;
1381 
1382 		/* All handlers must agree on per-cpuness */
1383 		if ((old->flags & IRQF_PERCPU) !=
1384 		    (new->flags & IRQF_PERCPU))
1385 			goto mismatch;
1386 
1387 		/* add new interrupt at end of irq queue */
1388 		do {
1389 			/*
1390 			 * Or all existing action->thread_mask bits,
1391 			 * so we can find the next zero bit for this
1392 			 * new action.
1393 			 */
1394 			thread_mask |= old->thread_mask;
1395 			old_ptr = &old->next;
1396 			old = *old_ptr;
1397 		} while (old);
1398 		shared = 1;
1399 	}
1400 
1401 	/*
1402 	 * Setup the thread mask for this irqaction for ONESHOT. For
1403 	 * !ONESHOT irqs the thread mask is 0 so we can avoid a
1404 	 * conditional in irq_wake_thread().
1405 	 */
1406 	if (new->flags & IRQF_ONESHOT) {
1407 		/*
1408 		 * Unlikely to have 32 resp 64 irqs sharing one line,
1409 		 * but who knows.
1410 		 */
1411 		if (thread_mask == ~0UL) {
1412 			ret = -EBUSY;
1413 			goto out_unlock;
1414 		}
1415 		/*
1416 		 * The thread_mask for the action is or'ed to
1417 		 * desc->thread_active to indicate that the
1418 		 * IRQF_ONESHOT thread handler has been woken, but not
1419 		 * yet finished. The bit is cleared when a thread
1420 		 * completes. When all threads of a shared interrupt
1421 		 * line have completed desc->threads_active becomes
1422 		 * zero and the interrupt line is unmasked. See
1423 		 * handle.c:irq_wake_thread() for further information.
1424 		 *
1425 		 * If no thread is woken by primary (hard irq context)
1426 		 * interrupt handlers, then desc->threads_active is
1427 		 * also checked for zero to unmask the irq line in the
1428 		 * affected hard irq flow handlers
1429 		 * (handle_[fasteoi|level]_irq).
1430 		 *
1431 		 * The new action gets the first zero bit of
1432 		 * thread_mask assigned. See the loop above which or's
1433 		 * all existing action->thread_mask bits.
1434 		 */
1435 		new->thread_mask = 1UL << ffz(thread_mask);
1436 
1437 	} else if (new->handler == irq_default_primary_handler &&
1438 		   !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1439 		/*
1440 		 * The interrupt was requested with handler = NULL, so
1441 		 * we use the default primary handler for it. But it
1442 		 * does not have the oneshot flag set. In combination
1443 		 * with level interrupts this is deadly, because the
1444 		 * default primary handler just wakes the thread, then
1445 		 * the irq lines is reenabled, but the device still
1446 		 * has the level irq asserted. Rinse and repeat....
1447 		 *
1448 		 * While this works for edge type interrupts, we play
1449 		 * it safe and reject unconditionally because we can't
1450 		 * say for sure which type this interrupt really
1451 		 * has. The type flags are unreliable as the
1452 		 * underlying chip implementation can override them.
1453 		 */
1454 		pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1455 		       irq);
1456 		ret = -EINVAL;
1457 		goto out_unlock;
1458 	}
1459 
1460 	if (!shared) {
1461 		init_waitqueue_head(&desc->wait_for_threads);
1462 
1463 		/* Setup the type (level, edge polarity) if configured: */
1464 		if (new->flags & IRQF_TRIGGER_MASK) {
1465 			ret = __irq_set_trigger(desc,
1466 						new->flags & IRQF_TRIGGER_MASK);
1467 
1468 			if (ret)
1469 				goto out_unlock;
1470 		}
1471 
1472 		/*
1473 		 * Activate the interrupt. That activation must happen
1474 		 * independently of IRQ_NOAUTOEN. request_irq() can fail
1475 		 * and the callers are supposed to handle
1476 		 * that. enable_irq() of an interrupt requested with
1477 		 * IRQ_NOAUTOEN is not supposed to fail. The activation
1478 		 * keeps it in shutdown mode, it merily associates
1479 		 * resources if necessary and if that's not possible it
1480 		 * fails. Interrupts which are in managed shutdown mode
1481 		 * will simply ignore that activation request.
1482 		 */
1483 		ret = irq_activate(desc);
1484 		if (ret)
1485 			goto out_unlock;
1486 
1487 		desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1488 				  IRQS_ONESHOT | IRQS_WAITING);
1489 		irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1490 
1491 		if (new->flags & IRQF_PERCPU) {
1492 			irqd_set(&desc->irq_data, IRQD_PER_CPU);
1493 			irq_settings_set_per_cpu(desc);
1494 		}
1495 
1496 		if (new->flags & IRQF_ONESHOT)
1497 			desc->istate |= IRQS_ONESHOT;
1498 
1499 		/* Exclude IRQ from balancing if requested */
1500 		if (new->flags & IRQF_NOBALANCING) {
1501 			irq_settings_set_no_balancing(desc);
1502 			irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1503 		}
1504 
1505 		if (irq_settings_can_autoenable(desc)) {
1506 			irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1507 		} else {
1508 			/*
1509 			 * Shared interrupts do not go well with disabling
1510 			 * auto enable. The sharing interrupt might request
1511 			 * it while it's still disabled and then wait for
1512 			 * interrupts forever.
1513 			 */
1514 			WARN_ON_ONCE(new->flags & IRQF_SHARED);
1515 			/* Undo nested disables: */
1516 			desc->depth = 1;
1517 		}
1518 
1519 	} else if (new->flags & IRQF_TRIGGER_MASK) {
1520 		unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1521 		unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1522 
1523 		if (nmsk != omsk)
1524 			/* hope the handler works with current  trigger mode */
1525 			pr_warn("irq %d uses trigger mode %u; requested %u\n",
1526 				irq, omsk, nmsk);
1527 	}
1528 
1529 	*old_ptr = new;
1530 
1531 	irq_pm_install_action(desc, new);
1532 
1533 	/* Reset broken irq detection when installing new handler */
1534 	desc->irq_count = 0;
1535 	desc->irqs_unhandled = 0;
1536 
1537 	/*
1538 	 * Check whether we disabled the irq via the spurious handler
1539 	 * before. Reenable it and give it another chance.
1540 	 */
1541 	if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1542 		desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1543 		__enable_irq(desc);
1544 	}
1545 
1546 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1547 	chip_bus_sync_unlock(desc);
1548 	mutex_unlock(&desc->request_mutex);
1549 
1550 	irq_setup_timings(desc, new);
1551 
1552 	/*
1553 	 * Strictly no need to wake it up, but hung_task complains
1554 	 * when no hard interrupt wakes the thread up.
1555 	 */
1556 	if (new->thread)
1557 		wake_up_process(new->thread);
1558 	if (new->secondary)
1559 		wake_up_process(new->secondary->thread);
1560 
1561 	register_irq_proc(irq, desc);
1562 	new->dir = NULL;
1563 	register_handler_proc(irq, new);
1564 	return 0;
1565 
1566 mismatch:
1567 	if (!(new->flags & IRQF_PROBE_SHARED)) {
1568 		pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1569 		       irq, new->flags, new->name, old->flags, old->name);
1570 #ifdef CONFIG_DEBUG_SHIRQ
1571 		dump_stack();
1572 #endif
1573 	}
1574 	ret = -EBUSY;
1575 
1576 out_unlock:
1577 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1578 
1579 	if (!desc->action)
1580 		irq_release_resources(desc);
1581 out_bus_unlock:
1582 	chip_bus_sync_unlock(desc);
1583 	mutex_unlock(&desc->request_mutex);
1584 
1585 out_thread:
1586 	if (new->thread) {
1587 		struct task_struct *t = new->thread;
1588 
1589 		new->thread = NULL;
1590 		kthread_stop(t);
1591 		put_task_struct(t);
1592 	}
1593 	if (new->secondary && new->secondary->thread) {
1594 		struct task_struct *t = new->secondary->thread;
1595 
1596 		new->secondary->thread = NULL;
1597 		kthread_stop(t);
1598 		put_task_struct(t);
1599 	}
1600 out_mput:
1601 	module_put(desc->owner);
1602 	return ret;
1603 }
1604 
1605 /**
1606  *	setup_irq - setup an interrupt
1607  *	@irq: Interrupt line to setup
1608  *	@act: irqaction for the interrupt
1609  *
1610  * Used to statically setup interrupts in the early boot process.
1611  */
setup_irq(unsigned int irq,struct irqaction * act)1612 int setup_irq(unsigned int irq, struct irqaction *act)
1613 {
1614 	int retval;
1615 	struct irq_desc *desc = irq_to_desc(irq);
1616 
1617 	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1618 		return -EINVAL;
1619 
1620 	retval = irq_chip_pm_get(&desc->irq_data);
1621 	if (retval < 0)
1622 		return retval;
1623 
1624 	retval = __setup_irq(irq, desc, act);
1625 
1626 	if (retval)
1627 		irq_chip_pm_put(&desc->irq_data);
1628 
1629 	return retval;
1630 }
1631 EXPORT_SYMBOL_GPL(setup_irq);
1632 
1633 /*
1634  * Internal function to unregister an irqaction - used to free
1635  * regular and special interrupts that are part of the architecture.
1636  */
__free_irq(struct irq_desc * desc,void * dev_id)1637 static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1638 {
1639 	unsigned irq = desc->irq_data.irq;
1640 	struct irqaction *action, **action_ptr;
1641 	unsigned long flags;
1642 
1643 	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1644 
1645 	mutex_lock(&desc->request_mutex);
1646 	chip_bus_lock(desc);
1647 	raw_spin_lock_irqsave(&desc->lock, flags);
1648 
1649 	/*
1650 	 * There can be multiple actions per IRQ descriptor, find the right
1651 	 * one based on the dev_id:
1652 	 */
1653 	action_ptr = &desc->action;
1654 	for (;;) {
1655 		action = *action_ptr;
1656 
1657 		if (!action) {
1658 			WARN(1, "Trying to free already-free IRQ %d\n", irq);
1659 			raw_spin_unlock_irqrestore(&desc->lock, flags);
1660 			chip_bus_sync_unlock(desc);
1661 			mutex_unlock(&desc->request_mutex);
1662 			return NULL;
1663 		}
1664 
1665 		if (action->dev_id == dev_id)
1666 			break;
1667 		action_ptr = &action->next;
1668 	}
1669 
1670 	/* Found it - now remove it from the list of entries: */
1671 	*action_ptr = action->next;
1672 
1673 	irq_pm_remove_action(desc, action);
1674 
1675 	/* If this was the last handler, shut down the IRQ line: */
1676 	if (!desc->action) {
1677 		irq_settings_clr_disable_unlazy(desc);
1678 		/* Only shutdown. Deactivate after synchronize_hardirq() */
1679 		irq_shutdown(desc);
1680 	}
1681 
1682 #ifdef CONFIG_SMP
1683 	/* make sure affinity_hint is cleaned up */
1684 	if (WARN_ON_ONCE(desc->affinity_hint))
1685 		desc->affinity_hint = NULL;
1686 #endif
1687 
1688 	raw_spin_unlock_irqrestore(&desc->lock, flags);
1689 	/*
1690 	 * Drop bus_lock here so the changes which were done in the chip
1691 	 * callbacks above are synced out to the irq chips which hang
1692 	 * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
1693 	 *
1694 	 * Aside of that the bus_lock can also be taken from the threaded
1695 	 * handler in irq_finalize_oneshot() which results in a deadlock
1696 	 * because kthread_stop() would wait forever for the thread to
1697 	 * complete, which is blocked on the bus lock.
1698 	 *
1699 	 * The still held desc->request_mutex() protects against a
1700 	 * concurrent request_irq() of this irq so the release of resources
1701 	 * and timing data is properly serialized.
1702 	 */
1703 	chip_bus_sync_unlock(desc);
1704 
1705 	unregister_handler_proc(irq, action);
1706 
1707 	/*
1708 	 * Make sure it's not being used on another CPU and if the chip
1709 	 * supports it also make sure that there is no (not yet serviced)
1710 	 * interrupt in flight at the hardware level.
1711 	 */
1712 	__synchronize_hardirq(desc, true);
1713 
1714 #ifdef CONFIG_DEBUG_SHIRQ
1715 	/*
1716 	 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1717 	 * event to happen even now it's being freed, so let's make sure that
1718 	 * is so by doing an extra call to the handler ....
1719 	 *
1720 	 * ( We do this after actually deregistering it, to make sure that a
1721 	 *   'real' IRQ doesn't run in parallel with our fake. )
1722 	 */
1723 	if (action->flags & IRQF_SHARED) {
1724 		local_irq_save(flags);
1725 		action->handler(irq, dev_id);
1726 		local_irq_restore(flags);
1727 	}
1728 #endif
1729 
1730 	/*
1731 	 * The action has already been removed above, but the thread writes
1732 	 * its oneshot mask bit when it completes. Though request_mutex is
1733 	 * held across this which prevents __setup_irq() from handing out
1734 	 * the same bit to a newly requested action.
1735 	 */
1736 	if (action->thread) {
1737 		kthread_stop(action->thread);
1738 		put_task_struct(action->thread);
1739 		if (action->secondary && action->secondary->thread) {
1740 			kthread_stop(action->secondary->thread);
1741 			put_task_struct(action->secondary->thread);
1742 		}
1743 	}
1744 
1745 	/* Last action releases resources */
1746 	if (!desc->action) {
1747 		/*
1748 		 * Reaquire bus lock as irq_release_resources() might
1749 		 * require it to deallocate resources over the slow bus.
1750 		 */
1751 		chip_bus_lock(desc);
1752 		/*
1753 		 * There is no interrupt on the fly anymore. Deactivate it
1754 		 * completely.
1755 		 */
1756 		raw_spin_lock_irqsave(&desc->lock, flags);
1757 		irq_domain_deactivate_irq(&desc->irq_data);
1758 		raw_spin_unlock_irqrestore(&desc->lock, flags);
1759 
1760 		irq_release_resources(desc);
1761 		chip_bus_sync_unlock(desc);
1762 		irq_remove_timings(desc);
1763 	}
1764 
1765 	mutex_unlock(&desc->request_mutex);
1766 
1767 	irq_chip_pm_put(&desc->irq_data);
1768 	module_put(desc->owner);
1769 	kfree(action->secondary);
1770 	return action;
1771 }
1772 
1773 /**
1774  *	remove_irq - free an interrupt
1775  *	@irq: Interrupt line to free
1776  *	@act: irqaction for the interrupt
1777  *
1778  * Used to remove interrupts statically setup by the early boot process.
1779  */
remove_irq(unsigned int irq,struct irqaction * act)1780 void remove_irq(unsigned int irq, struct irqaction *act)
1781 {
1782 	struct irq_desc *desc = irq_to_desc(irq);
1783 
1784 	if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1785 		__free_irq(desc, act->dev_id);
1786 }
1787 EXPORT_SYMBOL_GPL(remove_irq);
1788 
1789 /**
1790  *	free_irq - free an interrupt allocated with request_irq
1791  *	@irq: Interrupt line to free
1792  *	@dev_id: Device identity to free
1793  *
1794  *	Remove an interrupt handler. The handler is removed and if the
1795  *	interrupt line is no longer in use by any driver it is disabled.
1796  *	On a shared IRQ the caller must ensure the interrupt is disabled
1797  *	on the card it drives before calling this function. The function
1798  *	does not return until any executing interrupts for this IRQ
1799  *	have completed.
1800  *
1801  *	This function must not be called from interrupt context.
1802  *
1803  *	Returns the devname argument passed to request_irq.
1804  */
free_irq(unsigned int irq,void * dev_id)1805 const void *free_irq(unsigned int irq, void *dev_id)
1806 {
1807 	struct irq_desc *desc = irq_to_desc(irq);
1808 	struct irqaction *action;
1809 	const char *devname;
1810 
1811 	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1812 		return NULL;
1813 
1814 #ifdef CONFIG_SMP
1815 	if (WARN_ON(desc->affinity_notify))
1816 		desc->affinity_notify = NULL;
1817 #endif
1818 
1819 	action = __free_irq(desc, dev_id);
1820 
1821 	if (!action)
1822 		return NULL;
1823 
1824 	devname = action->name;
1825 	kfree(action);
1826 	return devname;
1827 }
1828 EXPORT_SYMBOL(free_irq);
1829 
1830 /**
1831  *	request_threaded_irq - allocate an interrupt line
1832  *	@irq: Interrupt line to allocate
1833  *	@handler: Function to be called when the IRQ occurs.
1834  *		  Primary handler for threaded interrupts
1835  *		  If NULL and thread_fn != NULL the default
1836  *		  primary handler is installed
1837  *	@thread_fn: Function called from the irq handler thread
1838  *		    If NULL, no irq thread is created
1839  *	@irqflags: Interrupt type flags
1840  *	@devname: An ascii name for the claiming device
1841  *	@dev_id: A cookie passed back to the handler function
1842  *
1843  *	This call allocates interrupt resources and enables the
1844  *	interrupt line and IRQ handling. From the point this
1845  *	call is made your handler function may be invoked. Since
1846  *	your handler function must clear any interrupt the board
1847  *	raises, you must take care both to initialise your hardware
1848  *	and to set up the interrupt handler in the right order.
1849  *
1850  *	If you want to set up a threaded irq handler for your device
1851  *	then you need to supply @handler and @thread_fn. @handler is
1852  *	still called in hard interrupt context and has to check
1853  *	whether the interrupt originates from the device. If yes it
1854  *	needs to disable the interrupt on the device and return
1855  *	IRQ_WAKE_THREAD which will wake up the handler thread and run
1856  *	@thread_fn. This split handler design is necessary to support
1857  *	shared interrupts.
1858  *
1859  *	Dev_id must be globally unique. Normally the address of the
1860  *	device data structure is used as the cookie. Since the handler
1861  *	receives this value it makes sense to use it.
1862  *
1863  *	If your interrupt is shared you must pass a non NULL dev_id
1864  *	as this is required when freeing the interrupt.
1865  *
1866  *	Flags:
1867  *
1868  *	IRQF_SHARED		Interrupt is shared
1869  *	IRQF_TRIGGER_*		Specify active edge(s) or level
1870  *
1871  */
request_threaded_irq(unsigned int irq,irq_handler_t handler,irq_handler_t thread_fn,unsigned long irqflags,const char * devname,void * dev_id)1872 int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1873 			 irq_handler_t thread_fn, unsigned long irqflags,
1874 			 const char *devname, void *dev_id)
1875 {
1876 	struct irqaction *action;
1877 	struct irq_desc *desc;
1878 	int retval;
1879 
1880 	if (irq == IRQ_NOTCONNECTED)
1881 		return -ENOTCONN;
1882 
1883 	/*
1884 	 * Sanity-check: shared interrupts must pass in a real dev-ID,
1885 	 * otherwise we'll have trouble later trying to figure out
1886 	 * which interrupt is which (messes up the interrupt freeing
1887 	 * logic etc).
1888 	 *
1889 	 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
1890 	 * it cannot be set along with IRQF_NO_SUSPEND.
1891 	 */
1892 	if (((irqflags & IRQF_SHARED) && !dev_id) ||
1893 	    (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
1894 	    ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
1895 		return -EINVAL;
1896 
1897 	desc = irq_to_desc(irq);
1898 	if (!desc)
1899 		return -EINVAL;
1900 
1901 	if (!irq_settings_can_request(desc) ||
1902 	    WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1903 		return -EINVAL;
1904 
1905 	if (!handler) {
1906 		if (!thread_fn)
1907 			return -EINVAL;
1908 		handler = irq_default_primary_handler;
1909 	}
1910 
1911 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1912 	if (!action)
1913 		return -ENOMEM;
1914 
1915 	action->handler = handler;
1916 	action->thread_fn = thread_fn;
1917 	action->flags = irqflags;
1918 	action->name = devname;
1919 	action->dev_id = dev_id;
1920 
1921 	retval = irq_chip_pm_get(&desc->irq_data);
1922 	if (retval < 0) {
1923 		kfree(action);
1924 		return retval;
1925 	}
1926 
1927 	retval = __setup_irq(irq, desc, action);
1928 
1929 	if (retval) {
1930 		irq_chip_pm_put(&desc->irq_data);
1931 		kfree(action->secondary);
1932 		kfree(action);
1933 	}
1934 
1935 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
1936 	if (!retval && (irqflags & IRQF_SHARED)) {
1937 		/*
1938 		 * It's a shared IRQ -- the driver ought to be prepared for it
1939 		 * to happen immediately, so let's make sure....
1940 		 * We disable the irq to make sure that a 'real' IRQ doesn't
1941 		 * run in parallel with our fake.
1942 		 */
1943 		unsigned long flags;
1944 
1945 		disable_irq(irq);
1946 		local_irq_save(flags);
1947 
1948 		handler(irq, dev_id);
1949 
1950 		local_irq_restore(flags);
1951 		enable_irq(irq);
1952 	}
1953 #endif
1954 	return retval;
1955 }
1956 EXPORT_SYMBOL(request_threaded_irq);
1957 
1958 /**
1959  *	request_any_context_irq - allocate an interrupt line
1960  *	@irq: Interrupt line to allocate
1961  *	@handler: Function to be called when the IRQ occurs.
1962  *		  Threaded handler for threaded interrupts.
1963  *	@flags: Interrupt type flags
1964  *	@name: An ascii name for the claiming device
1965  *	@dev_id: A cookie passed back to the handler function
1966  *
1967  *	This call allocates interrupt resources and enables the
1968  *	interrupt line and IRQ handling. It selects either a
1969  *	hardirq or threaded handling method depending on the
1970  *	context.
1971  *
1972  *	On failure, it returns a negative value. On success,
1973  *	it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1974  */
request_any_context_irq(unsigned int irq,irq_handler_t handler,unsigned long flags,const char * name,void * dev_id)1975 int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1976 			    unsigned long flags, const char *name, void *dev_id)
1977 {
1978 	struct irq_desc *desc;
1979 	int ret;
1980 
1981 	if (irq == IRQ_NOTCONNECTED)
1982 		return -ENOTCONN;
1983 
1984 	desc = irq_to_desc(irq);
1985 	if (!desc)
1986 		return -EINVAL;
1987 
1988 	if (irq_settings_is_nested_thread(desc)) {
1989 		ret = request_threaded_irq(irq, NULL, handler,
1990 					   flags, name, dev_id);
1991 		return !ret ? IRQC_IS_NESTED : ret;
1992 	}
1993 
1994 	ret = request_irq(irq, handler, flags, name, dev_id);
1995 	return !ret ? IRQC_IS_HARDIRQ : ret;
1996 }
1997 EXPORT_SYMBOL_GPL(request_any_context_irq);
1998 
enable_percpu_irq(unsigned int irq,unsigned int type)1999 void enable_percpu_irq(unsigned int irq, unsigned int type)
2000 {
2001 	unsigned int cpu = smp_processor_id();
2002 	unsigned long flags;
2003 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2004 
2005 	if (!desc)
2006 		return;
2007 
2008 	/*
2009 	 * If the trigger type is not specified by the caller, then
2010 	 * use the default for this interrupt.
2011 	 */
2012 	type &= IRQ_TYPE_SENSE_MASK;
2013 	if (type == IRQ_TYPE_NONE)
2014 		type = irqd_get_trigger_type(&desc->irq_data);
2015 
2016 	if (type != IRQ_TYPE_NONE) {
2017 		int ret;
2018 
2019 		ret = __irq_set_trigger(desc, type);
2020 
2021 		if (ret) {
2022 			WARN(1, "failed to set type for IRQ%d\n", irq);
2023 			goto out;
2024 		}
2025 	}
2026 
2027 	irq_percpu_enable(desc, cpu);
2028 out:
2029 	irq_put_desc_unlock(desc, flags);
2030 }
2031 EXPORT_SYMBOL_GPL(enable_percpu_irq);
2032 
2033 /**
2034  * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
2035  * @irq:	Linux irq number to check for
2036  *
2037  * Must be called from a non migratable context. Returns the enable
2038  * state of a per cpu interrupt on the current cpu.
2039  */
irq_percpu_is_enabled(unsigned int irq)2040 bool irq_percpu_is_enabled(unsigned int irq)
2041 {
2042 	unsigned int cpu = smp_processor_id();
2043 	struct irq_desc *desc;
2044 	unsigned long flags;
2045 	bool is_enabled;
2046 
2047 	desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2048 	if (!desc)
2049 		return false;
2050 
2051 	is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
2052 	irq_put_desc_unlock(desc, flags);
2053 
2054 	return is_enabled;
2055 }
2056 EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
2057 
disable_percpu_irq(unsigned int irq)2058 void disable_percpu_irq(unsigned int irq)
2059 {
2060 	unsigned int cpu = smp_processor_id();
2061 	unsigned long flags;
2062 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2063 
2064 	if (!desc)
2065 		return;
2066 
2067 	irq_percpu_disable(desc, cpu);
2068 	irq_put_desc_unlock(desc, flags);
2069 }
2070 EXPORT_SYMBOL_GPL(disable_percpu_irq);
2071 
2072 /*
2073  * Internal function to unregister a percpu irqaction.
2074  */
__free_percpu_irq(unsigned int irq,void __percpu * dev_id)2075 static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2076 {
2077 	struct irq_desc *desc = irq_to_desc(irq);
2078 	struct irqaction *action;
2079 	unsigned long flags;
2080 
2081 	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
2082 
2083 	if (!desc)
2084 		return NULL;
2085 
2086 	raw_spin_lock_irqsave(&desc->lock, flags);
2087 
2088 	action = desc->action;
2089 	if (!action || action->percpu_dev_id != dev_id) {
2090 		WARN(1, "Trying to free already-free IRQ %d\n", irq);
2091 		goto bad;
2092 	}
2093 
2094 	if (!cpumask_empty(desc->percpu_enabled)) {
2095 		WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
2096 		     irq, cpumask_first(desc->percpu_enabled));
2097 		goto bad;
2098 	}
2099 
2100 	/* Found it - now remove it from the list of entries: */
2101 	desc->action = NULL;
2102 
2103 	raw_spin_unlock_irqrestore(&desc->lock, flags);
2104 
2105 	unregister_handler_proc(irq, action);
2106 
2107 	irq_chip_pm_put(&desc->irq_data);
2108 	module_put(desc->owner);
2109 	return action;
2110 
2111 bad:
2112 	raw_spin_unlock_irqrestore(&desc->lock, flags);
2113 	return NULL;
2114 }
2115 
2116 /**
2117  *	remove_percpu_irq - free a per-cpu interrupt
2118  *	@irq: Interrupt line to free
2119  *	@act: irqaction for the interrupt
2120  *
2121  * Used to remove interrupts statically setup by the early boot process.
2122  */
remove_percpu_irq(unsigned int irq,struct irqaction * act)2123 void remove_percpu_irq(unsigned int irq, struct irqaction *act)
2124 {
2125 	struct irq_desc *desc = irq_to_desc(irq);
2126 
2127 	if (desc && irq_settings_is_per_cpu_devid(desc))
2128 	    __free_percpu_irq(irq, act->percpu_dev_id);
2129 }
2130 
2131 /**
2132  *	free_percpu_irq - free an interrupt allocated with request_percpu_irq
2133  *	@irq: Interrupt line to free
2134  *	@dev_id: Device identity to free
2135  *
2136  *	Remove a percpu interrupt handler. The handler is removed, but
2137  *	the interrupt line is not disabled. This must be done on each
2138  *	CPU before calling this function. The function does not return
2139  *	until any executing interrupts for this IRQ have completed.
2140  *
2141  *	This function must not be called from interrupt context.
2142  */
free_percpu_irq(unsigned int irq,void __percpu * dev_id)2143 void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2144 {
2145 	struct irq_desc *desc = irq_to_desc(irq);
2146 
2147 	if (!desc || !irq_settings_is_per_cpu_devid(desc))
2148 		return;
2149 
2150 	chip_bus_lock(desc);
2151 	kfree(__free_percpu_irq(irq, dev_id));
2152 	chip_bus_sync_unlock(desc);
2153 }
2154 EXPORT_SYMBOL_GPL(free_percpu_irq);
2155 
2156 /**
2157  *	setup_percpu_irq - setup a per-cpu interrupt
2158  *	@irq: Interrupt line to setup
2159  *	@act: irqaction for the interrupt
2160  *
2161  * Used to statically setup per-cpu interrupts in the early boot process.
2162  */
setup_percpu_irq(unsigned int irq,struct irqaction * act)2163 int setup_percpu_irq(unsigned int irq, struct irqaction *act)
2164 {
2165 	struct irq_desc *desc = irq_to_desc(irq);
2166 	int retval;
2167 
2168 	if (!desc || !irq_settings_is_per_cpu_devid(desc))
2169 		return -EINVAL;
2170 
2171 	retval = irq_chip_pm_get(&desc->irq_data);
2172 	if (retval < 0)
2173 		return retval;
2174 
2175 	retval = __setup_irq(irq, desc, act);
2176 
2177 	if (retval)
2178 		irq_chip_pm_put(&desc->irq_data);
2179 
2180 	return retval;
2181 }
2182 
2183 /**
2184  *	__request_percpu_irq - allocate a percpu interrupt line
2185  *	@irq: Interrupt line to allocate
2186  *	@handler: Function to be called when the IRQ occurs.
2187  *	@flags: Interrupt type flags (IRQF_TIMER only)
2188  *	@devname: An ascii name for the claiming device
2189  *	@dev_id: A percpu cookie passed back to the handler function
2190  *
2191  *	This call allocates interrupt resources and enables the
2192  *	interrupt on the local CPU. If the interrupt is supposed to be
2193  *	enabled on other CPUs, it has to be done on each CPU using
2194  *	enable_percpu_irq().
2195  *
2196  *	Dev_id must be globally unique. It is a per-cpu variable, and
2197  *	the handler gets called with the interrupted CPU's instance of
2198  *	that variable.
2199  */
__request_percpu_irq(unsigned int irq,irq_handler_t handler,unsigned long flags,const char * devname,void __percpu * dev_id)2200 int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2201 			 unsigned long flags, const char *devname,
2202 			 void __percpu *dev_id)
2203 {
2204 	struct irqaction *action;
2205 	struct irq_desc *desc;
2206 	int retval;
2207 
2208 	if (!dev_id)
2209 		return -EINVAL;
2210 
2211 	desc = irq_to_desc(irq);
2212 	if (!desc || !irq_settings_can_request(desc) ||
2213 	    !irq_settings_is_per_cpu_devid(desc))
2214 		return -EINVAL;
2215 
2216 	if (flags && flags != IRQF_TIMER)
2217 		return -EINVAL;
2218 
2219 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2220 	if (!action)
2221 		return -ENOMEM;
2222 
2223 	action->handler = handler;
2224 	action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
2225 	action->name = devname;
2226 	action->percpu_dev_id = dev_id;
2227 
2228 	retval = irq_chip_pm_get(&desc->irq_data);
2229 	if (retval < 0) {
2230 		kfree(action);
2231 		return retval;
2232 	}
2233 
2234 	retval = __setup_irq(irq, desc, action);
2235 
2236 	if (retval) {
2237 		irq_chip_pm_put(&desc->irq_data);
2238 		kfree(action);
2239 	}
2240 
2241 	return retval;
2242 }
2243 EXPORT_SYMBOL_GPL(__request_percpu_irq);
2244 
__irq_get_irqchip_state(struct irq_data * data,enum irqchip_irq_state which,bool * state)2245 int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which,
2246 			    bool *state)
2247 {
2248 	struct irq_chip *chip;
2249 	int err = -EINVAL;
2250 
2251 	do {
2252 		chip = irq_data_get_irq_chip(data);
2253 		if (chip->irq_get_irqchip_state)
2254 			break;
2255 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2256 		data = data->parent_data;
2257 #else
2258 		data = NULL;
2259 #endif
2260 	} while (data);
2261 
2262 	if (data)
2263 		err = chip->irq_get_irqchip_state(data, which, state);
2264 	return err;
2265 }
2266 
2267 /**
2268  *	irq_get_irqchip_state - returns the irqchip state of a interrupt.
2269  *	@irq: Interrupt line that is forwarded to a VM
2270  *	@which: One of IRQCHIP_STATE_* the caller wants to know about
2271  *	@state: a pointer to a boolean where the state is to be storeed
2272  *
2273  *	This call snapshots the internal irqchip state of an
2274  *	interrupt, returning into @state the bit corresponding to
2275  *	stage @which
2276  *
2277  *	This function should be called with preemption disabled if the
2278  *	interrupt controller has per-cpu registers.
2279  */
irq_get_irqchip_state(unsigned int irq,enum irqchip_irq_state which,bool * state)2280 int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2281 			  bool *state)
2282 {
2283 	struct irq_desc *desc;
2284 	struct irq_data *data;
2285 	unsigned long flags;
2286 	int err = -EINVAL;
2287 
2288 	desc = irq_get_desc_buslock(irq, &flags, 0);
2289 	if (!desc)
2290 		return err;
2291 
2292 	data = irq_desc_get_irq_data(desc);
2293 
2294 	err = __irq_get_irqchip_state(data, which, state);
2295 
2296 	irq_put_desc_busunlock(desc, flags);
2297 	return err;
2298 }
2299 EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2300 
2301 /**
2302  *	irq_set_irqchip_state - set the state of a forwarded interrupt.
2303  *	@irq: Interrupt line that is forwarded to a VM
2304  *	@which: State to be restored (one of IRQCHIP_STATE_*)
2305  *	@val: Value corresponding to @which
2306  *
2307  *	This call sets the internal irqchip state of an interrupt,
2308  *	depending on the value of @which.
2309  *
2310  *	This function should be called with preemption disabled if the
2311  *	interrupt controller has per-cpu registers.
2312  */
irq_set_irqchip_state(unsigned int irq,enum irqchip_irq_state which,bool val)2313 int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2314 			  bool val)
2315 {
2316 	struct irq_desc *desc;
2317 	struct irq_data *data;
2318 	struct irq_chip *chip;
2319 	unsigned long flags;
2320 	int err = -EINVAL;
2321 
2322 	desc = irq_get_desc_buslock(irq, &flags, 0);
2323 	if (!desc)
2324 		return err;
2325 
2326 	data = irq_desc_get_irq_data(desc);
2327 
2328 	do {
2329 		chip = irq_data_get_irq_chip(data);
2330 		if (chip->irq_set_irqchip_state)
2331 			break;
2332 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2333 		data = data->parent_data;
2334 #else
2335 		data = NULL;
2336 #endif
2337 	} while (data);
2338 
2339 	if (data)
2340 		err = chip->irq_set_irqchip_state(data, which, val);
2341 
2342 	irq_put_desc_busunlock(desc, flags);
2343 	return err;
2344 }
2345 EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
2346