• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Based on arch/arm/kernel/irq.c
4  *
5  * Copyright (C) 1992 Linus Torvalds
6  * Modifications for ARM processor Copyright (C) 1995-2000 Russell King.
7  * Support for Dynamic Tick Timer Copyright (C) 2004-2005 Nokia Corporation.
8  * Dynamic Tick Timer written by Tony Lindgren <tony@atomide.com> and
9  * Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>.
10  * Copyright (C) 2012 ARM Ltd.
11  */
12 
13 #include <linux/irq.h>
14 #include <linux/memory.h>
15 #include <linux/smp.h>
16 #include <linux/hardirq.h>
17 #include <linux/init.h>
18 #include <linux/irqchip.h>
19 #include <linux/kprobes.h>
20 #include <linux/scs.h>
21 #include <linux/seq_file.h>
22 #include <asm/numa.h>
23 #include <linux/vmalloc.h>
24 #include <asm/daifflags.h>
25 #include <asm/vmap_stack.h>
26 
27 /* Only access this in an NMI enter/exit */
28 DEFINE_PER_CPU(struct nmi_ctx, nmi_contexts);
29 
30 DEFINE_PER_CPU(unsigned long *, irq_stack_ptr);
31 
32 
33 DECLARE_PER_CPU(unsigned long *, irq_shadow_call_stack_ptr);
34 
35 #ifdef CONFIG_SHADOW_CALL_STACK
36 DEFINE_PER_CPU(unsigned long *, irq_shadow_call_stack_ptr);
37 #endif
38 
init_irq_scs(void)39 static void init_irq_scs(void)
40 {
41 	int cpu;
42 
43 	if (!scs_is_enabled())
44 		return;
45 
46 	for_each_possible_cpu(cpu)
47 		per_cpu(irq_shadow_call_stack_ptr, cpu) =
48 			scs_alloc(early_cpu_to_node(cpu));
49 }
50 
51 #ifdef CONFIG_VMAP_STACK
init_irq_stacks(void)52 static void __init init_irq_stacks(void)
53 {
54 	int cpu;
55 	unsigned long *p;
56 
57 	for_each_possible_cpu(cpu) {
58 		p = arch_alloc_vmap_stack(IRQ_STACK_SIZE, early_cpu_to_node(cpu));
59 		per_cpu(irq_stack_ptr, cpu) = p;
60 	}
61 }
62 #else
63 /* irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned. */
64 DEFINE_PER_CPU_ALIGNED(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack);
65 
init_irq_stacks(void)66 static void init_irq_stacks(void)
67 {
68 	int cpu;
69 
70 	for_each_possible_cpu(cpu)
71 		per_cpu(irq_stack_ptr, cpu) = per_cpu(irq_stack, cpu);
72 }
73 #endif
74 
default_handle_irq(struct pt_regs * regs)75 static void default_handle_irq(struct pt_regs *regs)
76 {
77 	panic("IRQ taken without a root IRQ handler\n");
78 }
79 
default_handle_fiq(struct pt_regs * regs)80 static void default_handle_fiq(struct pt_regs *regs)
81 {
82 	panic("FIQ taken without a root FIQ handler\n");
83 }
84 
85 void (*handle_arch_irq)(struct pt_regs *) __ro_after_init = default_handle_irq;
86 void (*handle_arch_fiq)(struct pt_regs *) __ro_after_init = default_handle_fiq;
87 
set_handle_irq(void (* handle_irq)(struct pt_regs *))88 int __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
89 {
90 	if (handle_arch_irq != default_handle_irq)
91 		return -EBUSY;
92 
93 	handle_arch_irq = handle_irq;
94 	pr_info("Root IRQ handler: %ps\n", handle_irq);
95 	return 0;
96 }
97 
set_handle_fiq(void (* handle_fiq)(struct pt_regs *))98 int __init set_handle_fiq(void (*handle_fiq)(struct pt_regs *))
99 {
100 	if (handle_arch_fiq != default_handle_fiq)
101 		return -EBUSY;
102 
103 	handle_arch_fiq = handle_fiq;
104 	pr_info("Root FIQ handler: %ps\n", handle_fiq);
105 	return 0;
106 }
107 
init_IRQ(void)108 void __init init_IRQ(void)
109 {
110 	init_irq_stacks();
111 	init_irq_scs();
112 	irqchip_init();
113 
114 	if (system_uses_irq_prio_masking()) {
115 		/*
116 		 * Now that we have a stack for our IRQ handler, set
117 		 * the PMR/PSR pair to a consistent state.
118 		 */
119 		WARN_ON(read_sysreg(daif) & PSR_A_BIT);
120 		local_daif_restore(DAIF_PROCCTX_NOIRQ);
121 	}
122 }
123