1 /*
2 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
3 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
4 *
5 * This file contains the interrupt descriptor management code
6 *
7 * Detailed information is available in Documentation/DocBook/genericirq
8 *
9 */
10 #include <linux/irq.h>
11 #include <linux/slab.h>
12 #include <linux/export.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/radix-tree.h>
16 #include <linux/bitmap.h>
17 #include <linux/irqdomain.h>
18
19 #include "internals.h"
20
21 /*
22 * lockdep: we want to handle all irq_desc locks as a single lock-class:
23 */
24 static struct lock_class_key irq_desc_lock_class;
25
26 #if defined(CONFIG_SMP)
init_irq_default_affinity(void)27 static void __init init_irq_default_affinity(void)
28 {
29 alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
30 cpumask_setall(irq_default_affinity);
31 }
32 #else
init_irq_default_affinity(void)33 static void __init init_irq_default_affinity(void)
34 {
35 }
36 #endif
37
38 #ifdef CONFIG_SMP
alloc_masks(struct irq_desc * desc,gfp_t gfp,int node)39 static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
40 {
41 if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
42 return -ENOMEM;
43
44 #ifdef CONFIG_GENERIC_PENDING_IRQ
45 if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
46 free_cpumask_var(desc->irq_data.affinity);
47 return -ENOMEM;
48 }
49 #endif
50 return 0;
51 }
52
desc_smp_init(struct irq_desc * desc,int node)53 static void desc_smp_init(struct irq_desc *desc, int node)
54 {
55 desc->irq_data.node = node;
56 cpumask_copy(desc->irq_data.affinity, irq_default_affinity);
57 #ifdef CONFIG_GENERIC_PENDING_IRQ
58 cpumask_clear(desc->pending_mask);
59 #endif
60 }
61
desc_node(struct irq_desc * desc)62 static inline int desc_node(struct irq_desc *desc)
63 {
64 return desc->irq_data.node;
65 }
66
67 #else
68 static inline int
alloc_masks(struct irq_desc * desc,gfp_t gfp,int node)69 alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
desc_smp_init(struct irq_desc * desc,int node)70 static inline void desc_smp_init(struct irq_desc *desc, int node) { }
desc_node(struct irq_desc * desc)71 static inline int desc_node(struct irq_desc *desc) { return 0; }
72 #endif
73
desc_set_defaults(unsigned int irq,struct irq_desc * desc,int node,struct module * owner)74 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
75 struct module *owner)
76 {
77 int cpu;
78
79 desc->irq_data.irq = irq;
80 desc->irq_data.chip = &no_irq_chip;
81 desc->irq_data.chip_data = NULL;
82 desc->irq_data.handler_data = NULL;
83 desc->irq_data.msi_desc = NULL;
84 irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
85 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
86 desc->handle_irq = handle_bad_irq;
87 desc->depth = 1;
88 desc->irq_count = 0;
89 desc->irqs_unhandled = 0;
90 desc->name = NULL;
91 desc->owner = owner;
92 for_each_possible_cpu(cpu)
93 *per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
94 desc_smp_init(desc, node);
95 }
96
97 int nr_irqs = NR_IRQS;
98 EXPORT_SYMBOL_GPL(nr_irqs);
99
100 static DEFINE_MUTEX(sparse_irq_lock);
101 static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
102
103 #ifdef CONFIG_SPARSE_IRQ
104
105 static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
106
irq_insert_desc(unsigned int irq,struct irq_desc * desc)107 static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
108 {
109 radix_tree_insert(&irq_desc_tree, irq, desc);
110 }
111
irq_to_desc(unsigned int irq)112 struct irq_desc *irq_to_desc(unsigned int irq)
113 {
114 return radix_tree_lookup(&irq_desc_tree, irq);
115 }
116 EXPORT_SYMBOL(irq_to_desc);
117
delete_irq_desc(unsigned int irq)118 static void delete_irq_desc(unsigned int irq)
119 {
120 radix_tree_delete(&irq_desc_tree, irq);
121 }
122
123 #ifdef CONFIG_SMP
free_masks(struct irq_desc * desc)124 static void free_masks(struct irq_desc *desc)
125 {
126 #ifdef CONFIG_GENERIC_PENDING_IRQ
127 free_cpumask_var(desc->pending_mask);
128 #endif
129 free_cpumask_var(desc->irq_data.affinity);
130 }
131 #else
free_masks(struct irq_desc * desc)132 static inline void free_masks(struct irq_desc *desc) { }
133 #endif
134
irq_lock_sparse(void)135 void irq_lock_sparse(void)
136 {
137 mutex_lock(&sparse_irq_lock);
138 }
139
irq_unlock_sparse(void)140 void irq_unlock_sparse(void)
141 {
142 mutex_unlock(&sparse_irq_lock);
143 }
144
alloc_desc(int irq,int node,struct module * owner)145 static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
146 {
147 struct irq_desc *desc;
148 gfp_t gfp = GFP_KERNEL;
149
150 desc = kzalloc_node(sizeof(*desc), gfp, node);
151 if (!desc)
152 return NULL;
153 /* allocate based on nr_cpu_ids */
154 desc->kstat_irqs = alloc_percpu(unsigned int);
155 if (!desc->kstat_irqs)
156 goto err_desc;
157
158 if (alloc_masks(desc, gfp, node))
159 goto err_kstat;
160
161 raw_spin_lock_init(&desc->lock);
162 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
163
164 desc_set_defaults(irq, desc, node, owner);
165
166 return desc;
167
168 err_kstat:
169 free_percpu(desc->kstat_irqs);
170 err_desc:
171 kfree(desc);
172 return NULL;
173 }
174
free_desc(unsigned int irq)175 static void free_desc(unsigned int irq)
176 {
177 struct irq_desc *desc = irq_to_desc(irq);
178
179 unregister_irq_proc(irq, desc);
180
181 /*
182 * sparse_irq_lock protects also show_interrupts() and
183 * kstat_irq_usr(). Once we deleted the descriptor from the
184 * sparse tree we can free it. Access in proc will fail to
185 * lookup the descriptor.
186 */
187 mutex_lock(&sparse_irq_lock);
188 delete_irq_desc(irq);
189 mutex_unlock(&sparse_irq_lock);
190
191 free_masks(desc);
192 free_percpu(desc->kstat_irqs);
193 kfree(desc);
194 }
195
alloc_descs(unsigned int start,unsigned int cnt,int node,struct module * owner)196 static int alloc_descs(unsigned int start, unsigned int cnt, int node,
197 struct module *owner)
198 {
199 struct irq_desc *desc;
200 int i;
201
202 for (i = 0; i < cnt; i++) {
203 desc = alloc_desc(start + i, node, owner);
204 if (!desc)
205 goto err;
206 mutex_lock(&sparse_irq_lock);
207 irq_insert_desc(start + i, desc);
208 mutex_unlock(&sparse_irq_lock);
209 }
210 return start;
211
212 err:
213 for (i--; i >= 0; i--)
214 free_desc(start + i);
215
216 mutex_lock(&sparse_irq_lock);
217 bitmap_clear(allocated_irqs, start, cnt);
218 mutex_unlock(&sparse_irq_lock);
219 return -ENOMEM;
220 }
221
irq_expand_nr_irqs(unsigned int nr)222 static int irq_expand_nr_irqs(unsigned int nr)
223 {
224 if (nr > IRQ_BITMAP_BITS)
225 return -ENOMEM;
226 nr_irqs = nr;
227 return 0;
228 }
229
early_irq_init(void)230 int __init early_irq_init(void)
231 {
232 int i, initcnt, node = first_online_node;
233 struct irq_desc *desc;
234
235 init_irq_default_affinity();
236
237 /* Let arch update nr_irqs and return the nr of preallocated irqs */
238 initcnt = arch_probe_nr_irqs();
239 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
240
241 if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
242 nr_irqs = IRQ_BITMAP_BITS;
243
244 if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
245 initcnt = IRQ_BITMAP_BITS;
246
247 if (initcnt > nr_irqs)
248 nr_irqs = initcnt;
249
250 for (i = 0; i < initcnt; i++) {
251 desc = alloc_desc(i, node, NULL);
252 set_bit(i, allocated_irqs);
253 irq_insert_desc(i, desc);
254 }
255 return arch_early_irq_init();
256 }
257
258 #else /* !CONFIG_SPARSE_IRQ */
259
260 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
261 [0 ... NR_IRQS-1] = {
262 .handle_irq = handle_bad_irq,
263 .depth = 1,
264 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
265 }
266 };
267
early_irq_init(void)268 int __init early_irq_init(void)
269 {
270 int count, i, node = first_online_node;
271 struct irq_desc *desc;
272
273 init_irq_default_affinity();
274
275 printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
276
277 desc = irq_desc;
278 count = ARRAY_SIZE(irq_desc);
279
280 for (i = 0; i < count; i++) {
281 desc[i].kstat_irqs = alloc_percpu(unsigned int);
282 alloc_masks(&desc[i], GFP_KERNEL, node);
283 raw_spin_lock_init(&desc[i].lock);
284 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
285 desc_set_defaults(i, &desc[i], node, NULL);
286 }
287 return arch_early_irq_init();
288 }
289
irq_to_desc(unsigned int irq)290 struct irq_desc *irq_to_desc(unsigned int irq)
291 {
292 return (irq < NR_IRQS) ? irq_desc + irq : NULL;
293 }
294 EXPORT_SYMBOL(irq_to_desc);
295
free_desc(unsigned int irq)296 static void free_desc(unsigned int irq)
297 {
298 struct irq_desc *desc = irq_to_desc(irq);
299 unsigned long flags;
300
301 raw_spin_lock_irqsave(&desc->lock, flags);
302 desc_set_defaults(irq, desc, desc_node(desc), NULL);
303 raw_spin_unlock_irqrestore(&desc->lock, flags);
304 }
305
alloc_descs(unsigned int start,unsigned int cnt,int node,struct module * owner)306 static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
307 struct module *owner)
308 {
309 u32 i;
310
311 for (i = 0; i < cnt; i++) {
312 struct irq_desc *desc = irq_to_desc(start + i);
313
314 desc->owner = owner;
315 }
316 return start;
317 }
318
irq_expand_nr_irqs(unsigned int nr)319 static int irq_expand_nr_irqs(unsigned int nr)
320 {
321 return -ENOMEM;
322 }
323
irq_mark_irq(unsigned int irq)324 void irq_mark_irq(unsigned int irq)
325 {
326 mutex_lock(&sparse_irq_lock);
327 bitmap_set(allocated_irqs, irq, 1);
328 mutex_unlock(&sparse_irq_lock);
329 }
330
331 #ifdef CONFIG_GENERIC_IRQ_LEGACY
irq_init_desc(unsigned int irq)332 void irq_init_desc(unsigned int irq)
333 {
334 free_desc(irq);
335 }
336 #endif
337
338 #endif /* !CONFIG_SPARSE_IRQ */
339
340 /**
341 * generic_handle_irq - Invoke the handler for a particular irq
342 * @irq: The irq number to handle
343 *
344 */
generic_handle_irq(unsigned int irq)345 int generic_handle_irq(unsigned int irq)
346 {
347 struct irq_desc *desc = irq_to_desc(irq);
348
349 if (!desc)
350 return -EINVAL;
351 generic_handle_irq_desc(irq, desc);
352 return 0;
353 }
354 EXPORT_SYMBOL_GPL(generic_handle_irq);
355
356 #ifdef CONFIG_HANDLE_DOMAIN_IRQ
357 /**
358 * __handle_domain_irq - Invoke the handler for a HW irq belonging to a domain
359 * @domain: The domain where to perform the lookup
360 * @hwirq: The HW irq number to convert to a logical one
361 * @lookup: Whether to perform the domain lookup or not
362 * @regs: Register file coming from the low-level handling code
363 *
364 * Returns: 0 on success, or -EINVAL if conversion has failed
365 */
__handle_domain_irq(struct irq_domain * domain,unsigned int hwirq,bool lookup,struct pt_regs * regs)366 int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
367 bool lookup, struct pt_regs *regs)
368 {
369 struct pt_regs *old_regs = set_irq_regs(regs);
370 unsigned int irq = hwirq;
371 int ret = 0;
372
373 irq_enter();
374
375 #ifdef CONFIG_IRQ_DOMAIN
376 if (lookup)
377 irq = irq_find_mapping(domain, hwirq);
378 #endif
379
380 /*
381 * Some hardware gives randomly wrong interrupts. Rather
382 * than crashing, do something sensible.
383 */
384 if (unlikely(!irq || irq >= nr_irqs)) {
385 ack_bad_irq(irq);
386 ret = -EINVAL;
387 } else {
388 generic_handle_irq(irq);
389 }
390
391 irq_exit();
392 set_irq_regs(old_regs);
393 return ret;
394 }
395 #endif
396
397 /* Dynamic interrupt handling */
398
399 /**
400 * irq_free_descs - free irq descriptors
401 * @from: Start of descriptor range
402 * @cnt: Number of consecutive irqs to free
403 */
irq_free_descs(unsigned int from,unsigned int cnt)404 void irq_free_descs(unsigned int from, unsigned int cnt)
405 {
406 int i;
407
408 if (from >= nr_irqs || (from + cnt) > nr_irqs)
409 return;
410
411 for (i = 0; i < cnt; i++)
412 free_desc(from + i);
413
414 mutex_lock(&sparse_irq_lock);
415 bitmap_clear(allocated_irqs, from, cnt);
416 mutex_unlock(&sparse_irq_lock);
417 }
418 EXPORT_SYMBOL_GPL(irq_free_descs);
419
420 /**
421 * irq_alloc_descs - allocate and initialize a range of irq descriptors
422 * @irq: Allocate for specific irq number if irq >= 0
423 * @from: Start the search from this irq number
424 * @cnt: Number of consecutive irqs to allocate.
425 * @node: Preferred node on which the irq descriptor should be allocated
426 * @owner: Owning module (can be NULL)
427 *
428 * Returns the first irq number or error code
429 */
430 int __ref
__irq_alloc_descs(int irq,unsigned int from,unsigned int cnt,int node,struct module * owner)431 __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
432 struct module *owner)
433 {
434 int start, ret;
435
436 if (!cnt)
437 return -EINVAL;
438
439 if (irq >= 0) {
440 if (from > irq)
441 return -EINVAL;
442 from = irq;
443 } else {
444 /*
445 * For interrupts which are freely allocated the
446 * architecture can force a lower bound to the @from
447 * argument. x86 uses this to exclude the GSI space.
448 */
449 from = arch_dynirq_lower_bound(from);
450 }
451
452 mutex_lock(&sparse_irq_lock);
453
454 start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
455 from, cnt, 0);
456 ret = -EEXIST;
457 if (irq >=0 && start != irq)
458 goto err;
459
460 if (start + cnt > nr_irqs) {
461 ret = irq_expand_nr_irqs(start + cnt);
462 if (ret)
463 goto err;
464 }
465
466 bitmap_set(allocated_irqs, start, cnt);
467 mutex_unlock(&sparse_irq_lock);
468 return alloc_descs(start, cnt, node, owner);
469
470 err:
471 mutex_unlock(&sparse_irq_lock);
472 return ret;
473 }
474 EXPORT_SYMBOL_GPL(__irq_alloc_descs);
475
476 #ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
477 /**
478 * irq_alloc_hwirqs - Allocate an irq descriptor and initialize the hardware
479 * @cnt: number of interrupts to allocate
480 * @node: node on which to allocate
481 *
482 * Returns an interrupt number > 0 or 0, if the allocation fails.
483 */
irq_alloc_hwirqs(int cnt,int node)484 unsigned int irq_alloc_hwirqs(int cnt, int node)
485 {
486 int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL);
487
488 if (irq < 0)
489 return 0;
490
491 for (i = irq; cnt > 0; i++, cnt--) {
492 if (arch_setup_hwirq(i, node))
493 goto err;
494 irq_clear_status_flags(i, _IRQ_NOREQUEST);
495 }
496 return irq;
497
498 err:
499 for (i--; i >= irq; i--) {
500 irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
501 arch_teardown_hwirq(i);
502 }
503 irq_free_descs(irq, cnt);
504 return 0;
505 }
506 EXPORT_SYMBOL_GPL(irq_alloc_hwirqs);
507
508 /**
509 * irq_free_hwirqs - Free irq descriptor and cleanup the hardware
510 * @from: Free from irq number
511 * @cnt: number of interrupts to free
512 *
513 */
irq_free_hwirqs(unsigned int from,int cnt)514 void irq_free_hwirqs(unsigned int from, int cnt)
515 {
516 int i, j;
517
518 for (i = from, j = cnt; j > 0; i++, j--) {
519 irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
520 arch_teardown_hwirq(i);
521 }
522 irq_free_descs(from, cnt);
523 }
524 EXPORT_SYMBOL_GPL(irq_free_hwirqs);
525 #endif
526
527 /**
528 * irq_get_next_irq - get next allocated irq number
529 * @offset: where to start the search
530 *
531 * Returns next irq number after offset or nr_irqs if none is found.
532 */
irq_get_next_irq(unsigned int offset)533 unsigned int irq_get_next_irq(unsigned int offset)
534 {
535 return find_next_bit(allocated_irqs, nr_irqs, offset);
536 }
537
538 struct irq_desc *
__irq_get_desc_lock(unsigned int irq,unsigned long * flags,bool bus,unsigned int check)539 __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
540 unsigned int check)
541 {
542 struct irq_desc *desc = irq_to_desc(irq);
543
544 if (desc) {
545 if (check & _IRQ_DESC_CHECK) {
546 if ((check & _IRQ_DESC_PERCPU) &&
547 !irq_settings_is_per_cpu_devid(desc))
548 return NULL;
549
550 if (!(check & _IRQ_DESC_PERCPU) &&
551 irq_settings_is_per_cpu_devid(desc))
552 return NULL;
553 }
554
555 if (bus)
556 chip_bus_lock(desc);
557 raw_spin_lock_irqsave(&desc->lock, *flags);
558 }
559 return desc;
560 }
561
__irq_put_desc_unlock(struct irq_desc * desc,unsigned long flags,bool bus)562 void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
563 {
564 raw_spin_unlock_irqrestore(&desc->lock, flags);
565 if (bus)
566 chip_bus_sync_unlock(desc);
567 }
568
irq_set_percpu_devid(unsigned int irq)569 int irq_set_percpu_devid(unsigned int irq)
570 {
571 struct irq_desc *desc = irq_to_desc(irq);
572
573 if (!desc)
574 return -EINVAL;
575
576 if (desc->percpu_enabled)
577 return -EINVAL;
578
579 desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL);
580
581 if (!desc->percpu_enabled)
582 return -ENOMEM;
583
584 irq_set_percpu_devid_flags(irq);
585 return 0;
586 }
587
kstat_incr_irq_this_cpu(unsigned int irq)588 void kstat_incr_irq_this_cpu(unsigned int irq)
589 {
590 kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
591 }
592
593 /**
594 * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu
595 * @irq: The interrupt number
596 * @cpu: The cpu number
597 *
598 * Returns the sum of interrupt counts on @cpu since boot for
599 * @irq. The caller must ensure that the interrupt is not removed
600 * concurrently.
601 */
kstat_irqs_cpu(unsigned int irq,int cpu)602 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
603 {
604 struct irq_desc *desc = irq_to_desc(irq);
605
606 return desc && desc->kstat_irqs ?
607 *per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
608 }
609
610 /**
611 * kstat_irqs - Get the statistics for an interrupt
612 * @irq: The interrupt number
613 *
614 * Returns the sum of interrupt counts on all cpus since boot for
615 * @irq. The caller must ensure that the interrupt is not removed
616 * concurrently.
617 */
kstat_irqs(unsigned int irq)618 unsigned int kstat_irqs(unsigned int irq)
619 {
620 struct irq_desc *desc = irq_to_desc(irq);
621 int cpu;
622 int sum = 0;
623
624 if (!desc || !desc->kstat_irqs)
625 return 0;
626 for_each_possible_cpu(cpu)
627 sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
628 return sum;
629 }
630
631 /**
632 * kstat_irqs_usr - Get the statistics for an interrupt
633 * @irq: The interrupt number
634 *
635 * Returns the sum of interrupt counts on all cpus since boot for
636 * @irq. Contrary to kstat_irqs() this can be called from any
637 * preemptible context. It's protected against concurrent removal of
638 * an interrupt descriptor when sparse irqs are enabled.
639 */
kstat_irqs_usr(unsigned int irq)640 unsigned int kstat_irqs_usr(unsigned int irq)
641 {
642 int sum;
643
644 irq_lock_sparse();
645 sum = kstat_irqs(irq);
646 irq_unlock_sparse();
647 return sum;
648 }
649