• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * linux/kernel/irq/handle.c
3  *
4  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
6  *
7  * This file contains the core interrupt handling code.
8  *
9  * Detailed information is available in Documentation/DocBook/genericirq
10  *
11  */
12 
13 #include <linux/irq.h>
14 #include <linux/module.h>
15 #include <linux/random.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/rculist.h>
19 #include <linux/hash.h>
20 
21 #include "internals.h"
22 
23 /*
24  * lockdep: we want to handle all irq_desc locks as a single lock-class:
25  */
26 struct lock_class_key irq_desc_lock_class;
27 
28 /**
29  * handle_bad_irq - handle spurious and unhandled irqs
30  * @irq:       the interrupt number
31  * @desc:      description of the interrupt
32  *
33  * Handles spurious and unhandled IRQ's. It also prints a debugmessage.
34  */
handle_bad_irq(unsigned int irq,struct irq_desc * desc)35 void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
36 {
37 	print_irq_desc(irq, desc);
38 	kstat_incr_irqs_this_cpu(irq, desc);
39 	ack_bad_irq(irq);
40 }
41 
42 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
init_irq_default_affinity(void)43 static void __init init_irq_default_affinity(void)
44 {
45 	alloc_bootmem_cpumask_var(&irq_default_affinity);
46 	cpumask_setall(irq_default_affinity);
47 }
48 #else
init_irq_default_affinity(void)49 static void __init init_irq_default_affinity(void)
50 {
51 }
52 #endif
53 
54 /*
55  * Linux has a controller-independent interrupt architecture.
56  * Every controller has a 'controller-template', that is used
57  * by the main code to do the right thing. Each driver-visible
58  * interrupt source is transparently wired to the appropriate
59  * controller. Thus drivers need not be aware of the
60  * interrupt-controller.
61  *
62  * The code is designed to be easily extended with new/different
63  * interrupt controllers, without having to do assembly magic or
64  * having to touch the generic code.
65  *
66  * Controller mappings for all interrupt sources:
67  */
68 int nr_irqs = NR_IRQS;
69 EXPORT_SYMBOL_GPL(nr_irqs);
70 
71 #ifdef CONFIG_SPARSE_IRQ
72 static struct irq_desc irq_desc_init = {
73 	.irq	    = -1,
74 	.status	    = IRQ_DISABLED,
75 	.chip	    = &no_irq_chip,
76 	.handle_irq = handle_bad_irq,
77 	.depth      = 1,
78 	.lock       = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
79 #ifdef CONFIG_SMP
80 	.affinity   = CPU_MASK_ALL
81 #endif
82 };
83 
init_kstat_irqs(struct irq_desc * desc,int cpu,int nr)84 void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
85 {
86 	unsigned long bytes;
87 	char *ptr;
88 	int node;
89 
90 	/* Compute how many bytes we need per irq and allocate them */
91 	bytes = nr * sizeof(unsigned int);
92 
93 	node = cpu_to_node(cpu);
94 	ptr = kzalloc_node(bytes, GFP_ATOMIC, node);
95 	printk(KERN_DEBUG "  alloc kstat_irqs on cpu %d node %d\n", cpu, node);
96 
97 	if (ptr)
98 		desc->kstat_irqs = (unsigned int *)ptr;
99 }
100 
init_one_irq_desc(int irq,struct irq_desc * desc,int cpu)101 static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
102 {
103 	memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
104 
105 	spin_lock_init(&desc->lock);
106 	desc->irq = irq;
107 #ifdef CONFIG_SMP
108 	desc->cpu = cpu;
109 #endif
110 	lockdep_set_class(&desc->lock, &irq_desc_lock_class);
111 	init_kstat_irqs(desc, cpu, nr_cpu_ids);
112 	if (!desc->kstat_irqs) {
113 		printk(KERN_ERR "can not alloc kstat_irqs\n");
114 		BUG_ON(1);
115 	}
116 	arch_init_chip_data(desc, cpu);
117 }
118 
119 /*
120  * Protect the sparse_irqs:
121  */
122 DEFINE_SPINLOCK(sparse_irq_lock);
123 
124 struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly;
125 
126 static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
127 	[0 ... NR_IRQS_LEGACY-1] = {
128 		.irq	    = -1,
129 		.status	    = IRQ_DISABLED,
130 		.chip	    = &no_irq_chip,
131 		.handle_irq = handle_bad_irq,
132 		.depth	    = 1,
133 		.lock	    = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
134 #ifdef CONFIG_SMP
135 		.affinity   = CPU_MASK_ALL
136 #endif
137 	}
138 };
139 
140 /* FIXME: use bootmem alloc ...*/
141 static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS];
142 
early_irq_init(void)143 int __init early_irq_init(void)
144 {
145 	struct irq_desc *desc;
146 	int legacy_count;
147 	int i;
148 
149 	init_irq_default_affinity();
150 
151 	desc = irq_desc_legacy;
152 	legacy_count = ARRAY_SIZE(irq_desc_legacy);
153 
154 	for (i = 0; i < legacy_count; i++) {
155 		desc[i].irq = i;
156 		desc[i].kstat_irqs = kstat_irqs_legacy[i];
157 		lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
158 
159 		irq_desc_ptrs[i] = desc + i;
160 	}
161 
162 	for (i = legacy_count; i < NR_IRQS; i++)
163 		irq_desc_ptrs[i] = NULL;
164 
165 	return arch_early_irq_init();
166 }
167 
irq_to_desc(unsigned int irq)168 struct irq_desc *irq_to_desc(unsigned int irq)
169 {
170 	return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL;
171 }
172 
irq_to_desc_alloc_cpu(unsigned int irq,int cpu)173 struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
174 {
175 	struct irq_desc *desc;
176 	unsigned long flags;
177 	int node;
178 
179 	if (irq >= NR_IRQS) {
180 		printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n",
181 				irq, NR_IRQS);
182 		WARN_ON(1);
183 		return NULL;
184 	}
185 
186 	desc = irq_desc_ptrs[irq];
187 	if (desc)
188 		return desc;
189 
190 	spin_lock_irqsave(&sparse_irq_lock, flags);
191 
192 	/* We have to check it to avoid races with another CPU */
193 	desc = irq_desc_ptrs[irq];
194 	if (desc)
195 		goto out_unlock;
196 
197 	node = cpu_to_node(cpu);
198 	desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
199 	printk(KERN_DEBUG "  alloc irq_desc for %d on cpu %d node %d\n",
200 		 irq, cpu, node);
201 	if (!desc) {
202 		printk(KERN_ERR "can not alloc irq_desc\n");
203 		BUG_ON(1);
204 	}
205 	init_one_irq_desc(irq, desc, cpu);
206 
207 	irq_desc_ptrs[irq] = desc;
208 
209 out_unlock:
210 	spin_unlock_irqrestore(&sparse_irq_lock, flags);
211 
212 	return desc;
213 }
214 
215 #else /* !CONFIG_SPARSE_IRQ */
216 
217 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
218 	[0 ... NR_IRQS-1] = {
219 		.status = IRQ_DISABLED,
220 		.chip = &no_irq_chip,
221 		.handle_irq = handle_bad_irq,
222 		.depth = 1,
223 		.lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
224 #ifdef CONFIG_SMP
225 		.affinity = CPU_MASK_ALL
226 #endif
227 	}
228 };
229 
early_irq_init(void)230 int __init early_irq_init(void)
231 {
232 	struct irq_desc *desc;
233 	int count;
234 	int i;
235 
236 	init_irq_default_affinity();
237 
238 	desc = irq_desc;
239 	count = ARRAY_SIZE(irq_desc);
240 
241 	for (i = 0; i < count; i++)
242 		desc[i].irq = i;
243 
244 	return arch_early_irq_init();
245 }
246 
irq_to_desc(unsigned int irq)247 struct irq_desc *irq_to_desc(unsigned int irq)
248 {
249 	return (irq < NR_IRQS) ? irq_desc + irq : NULL;
250 }
251 
irq_to_desc_alloc_cpu(unsigned int irq,int cpu)252 struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
253 {
254 	return irq_to_desc(irq);
255 }
256 #endif /* !CONFIG_SPARSE_IRQ */
257 
258 /*
259  * What should we do if we get a hw irq event on an illegal vector?
260  * Each architecture has to answer this themself.
261  */
ack_bad(unsigned int irq)262 static void ack_bad(unsigned int irq)
263 {
264 	struct irq_desc *desc = irq_to_desc(irq);
265 
266 	print_irq_desc(irq, desc);
267 	ack_bad_irq(irq);
268 }
269 
270 /*
271  * NOP functions
272  */
noop(unsigned int irq)273 static void noop(unsigned int irq)
274 {
275 }
276 
noop_ret(unsigned int irq)277 static unsigned int noop_ret(unsigned int irq)
278 {
279 	return 0;
280 }
281 
282 /*
283  * Generic no controller implementation
284  */
285 struct irq_chip no_irq_chip = {
286 	.name		= "none",
287 	.startup	= noop_ret,
288 	.shutdown	= noop,
289 	.enable		= noop,
290 	.disable	= noop,
291 	.ack		= ack_bad,
292 	.end		= noop,
293 };
294 
295 /*
296  * Generic dummy implementation which can be used for
297  * real dumb interrupt sources
298  */
299 struct irq_chip dummy_irq_chip = {
300 	.name		= "dummy",
301 	.startup	= noop_ret,
302 	.shutdown	= noop,
303 	.enable		= noop,
304 	.disable	= noop,
305 	.ack		= noop,
306 	.mask		= noop,
307 	.unmask		= noop,
308 	.end		= noop,
309 };
310 
311 /*
312  * Special, empty irq handler:
313  */
no_action(int cpl,void * dev_id)314 irqreturn_t no_action(int cpl, void *dev_id)
315 {
316 	return IRQ_NONE;
317 }
318 
319 /**
320  * handle_IRQ_event - irq action chain handler
321  * @irq:	the interrupt number
322  * @action:	the interrupt action chain for this irq
323  *
324  * Handles the action chain of an irq event
325  */
handle_IRQ_event(unsigned int irq,struct irqaction * action)326 irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
327 {
328 	irqreturn_t ret, retval = IRQ_NONE;
329 	unsigned int status = 0;
330 
331 	if (!(action->flags & IRQF_DISABLED))
332 		local_irq_enable_in_hardirq();
333 
334 	do {
335 		ret = action->handler(irq, action->dev_id);
336 		if (ret == IRQ_HANDLED)
337 			status |= action->flags;
338 		retval |= ret;
339 		action = action->next;
340 	} while (action);
341 
342 	if (status & IRQF_SAMPLE_RANDOM)
343 		add_interrupt_randomness(irq);
344 	local_irq_disable();
345 
346 	return retval;
347 }
348 
349 #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
350 /**
351  * __do_IRQ - original all in one highlevel IRQ handler
352  * @irq:	the interrupt number
353  *
354  * __do_IRQ handles all normal device IRQ's (the special
355  * SMP cross-CPU interrupts have their own specific
356  * handlers).
357  *
358  * This is the original x86 implementation which is used for every
359  * interrupt type.
360  */
__do_IRQ(unsigned int irq)361 unsigned int __do_IRQ(unsigned int irq)
362 {
363 	struct irq_desc *desc = irq_to_desc(irq);
364 	struct irqaction *action;
365 	unsigned int status;
366 
367 	kstat_incr_irqs_this_cpu(irq, desc);
368 
369 	if (CHECK_IRQ_PER_CPU(desc->status)) {
370 		irqreturn_t action_ret;
371 
372 		/*
373 		 * No locking required for CPU-local interrupts:
374 		 */
375 		if (desc->chip->ack) {
376 			desc->chip->ack(irq);
377 			/* get new one */
378 			desc = irq_remap_to_desc(irq, desc);
379 		}
380 		if (likely(!(desc->status & IRQ_DISABLED))) {
381 			action_ret = handle_IRQ_event(irq, desc->action);
382 			if (!noirqdebug)
383 				note_interrupt(irq, desc, action_ret);
384 		}
385 		desc->chip->end(irq);
386 		return 1;
387 	}
388 
389 	spin_lock(&desc->lock);
390 	if (desc->chip->ack) {
391 		desc->chip->ack(irq);
392 		desc = irq_remap_to_desc(irq, desc);
393 	}
394 	/*
395 	 * REPLAY is when Linux resends an IRQ that was dropped earlier
396 	 * WAITING is used by probe to mark irqs that are being tested
397 	 */
398 	status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
399 	status |= IRQ_PENDING; /* we _want_ to handle it */
400 
401 	/*
402 	 * If the IRQ is disabled for whatever reason, we cannot
403 	 * use the action we have.
404 	 */
405 	action = NULL;
406 	if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
407 		action = desc->action;
408 		status &= ~IRQ_PENDING; /* we commit to handling */
409 		status |= IRQ_INPROGRESS; /* we are handling it */
410 	}
411 	desc->status = status;
412 
413 	/*
414 	 * If there is no IRQ handler or it was disabled, exit early.
415 	 * Since we set PENDING, if another processor is handling
416 	 * a different instance of this same irq, the other processor
417 	 * will take care of it.
418 	 */
419 	if (unlikely(!action))
420 		goto out;
421 
422 	/*
423 	 * Edge triggered interrupts need to remember
424 	 * pending events.
425 	 * This applies to any hw interrupts that allow a second
426 	 * instance of the same irq to arrive while we are in do_IRQ
427 	 * or in the handler. But the code here only handles the _second_
428 	 * instance of the irq, not the third or fourth. So it is mostly
429 	 * useful for irq hardware that does not mask cleanly in an
430 	 * SMP environment.
431 	 */
432 	for (;;) {
433 		irqreturn_t action_ret;
434 
435 		spin_unlock(&desc->lock);
436 
437 		action_ret = handle_IRQ_event(irq, action);
438 		if (!noirqdebug)
439 			note_interrupt(irq, desc, action_ret);
440 
441 		spin_lock(&desc->lock);
442 		if (likely(!(desc->status & IRQ_PENDING)))
443 			break;
444 		desc->status &= ~IRQ_PENDING;
445 	}
446 	desc->status &= ~IRQ_INPROGRESS;
447 
448 out:
449 	/*
450 	 * The ->end() handler has to deal with interrupts which got
451 	 * disabled while the handler was running.
452 	 */
453 	desc->chip->end(irq);
454 	spin_unlock(&desc->lock);
455 
456 	return 1;
457 }
458 #endif
459 
early_init_irq_lock_class(void)460 void early_init_irq_lock_class(void)
461 {
462 	struct irq_desc *desc;
463 	int i;
464 
465 	for_each_irq_desc(i, desc) {
466 		lockdep_set_class(&desc->lock, &irq_desc_lock_class);
467 	}
468 }
469 
470 #ifdef CONFIG_SPARSE_IRQ
kstat_irqs_cpu(unsigned int irq,int cpu)471 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
472 {
473 	struct irq_desc *desc = irq_to_desc(irq);
474 	return desc ? desc->kstat_irqs[cpu] : 0;
475 }
476 #endif
477 EXPORT_SYMBOL(kstat_irqs_cpu);
478 
479