1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * linux/include/linux/nmi.h
4 */
5 #ifndef LINUX_NMI_H
6 #define LINUX_NMI_H
7
8 #include <linux/sched.h>
9 #include <asm/irq.h>
10 #if defined(CONFIG_HAVE_NMI_WATCHDOG)
11 #include <asm/nmi.h>
12 #endif
13
14 #ifdef CONFIG_LOCKUP_DETECTOR
15 void lockup_detector_init(void);
16 #ifdef CONFIG_CPU_ISOLATION_OPT
17 extern void watchdog_enable(unsigned int cpu);
18 extern void watchdog_disable(unsigned int cpu);
19 extern bool watchdog_configured(unsigned int cpu);
20 #endif
21 void lockup_detector_soft_poweroff(void);
22 void lockup_detector_cleanup(void);
23 bool is_hardlockup(void);
24
25 extern int watchdog_user_enabled;
26 extern int nmi_watchdog_user_enabled;
27 extern int soft_watchdog_user_enabled;
28 extern int watchdog_thresh;
29 extern unsigned long watchdog_enabled;
30
31 extern struct cpumask watchdog_cpumask;
32 extern unsigned long *watchdog_cpumask_bits;
33 #ifdef CONFIG_SMP
34 extern int sysctl_softlockup_all_cpu_backtrace;
35 extern int sysctl_hardlockup_all_cpu_backtrace;
36 #else
37 #define sysctl_softlockup_all_cpu_backtrace 0
38 #define sysctl_hardlockup_all_cpu_backtrace 0
39 #endif /* !CONFIG_SMP */
40
41 #else /* CONFIG_LOCKUP_DETECTOR */
lockup_detector_init(void)42 static inline void lockup_detector_init(void) { }
lockup_detector_soft_poweroff(void)43 static inline void lockup_detector_soft_poweroff(void) { }
lockup_detector_cleanup(void)44 static inline void lockup_detector_cleanup(void) { }
45 #ifdef CONFIG_CPU_ISOLATION_OPT
watchdog_enable(unsigned int cpu)46 static inline void watchdog_enable(unsigned int cpu)
47 {
48 }
watchdog_disable(unsigned int cpu)49 static inline void watchdog_disable(unsigned int cpu)
50 {
51 }
watchdog_configured(unsigned int cpu)52 static inline bool watchdog_configured(unsigned int cpu)
53 {
54 /*
55 * Pretend the watchdog is always configured.
56 * We will be waiting for the watchdog to be enabled in core isolation
57 */
58 return true;
59 }
60 #endif
61 #endif /* !CONFIG_LOCKUP_DETECTOR */
62
63 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
64 extern void touch_softlockup_watchdog_sched(void);
65 extern void touch_softlockup_watchdog(void);
66 extern void touch_softlockup_watchdog_sync(void);
67 extern void touch_all_softlockup_watchdogs(void);
68 extern unsigned int softlockup_panic;
69
70 extern int lockup_detector_online_cpu(unsigned int cpu);
71 extern int lockup_detector_offline_cpu(unsigned int cpu);
72 #else /* CONFIG_SOFTLOCKUP_DETECTOR */
touch_softlockup_watchdog_sched(void)73 static inline void touch_softlockup_watchdog_sched(void) { }
touch_softlockup_watchdog(void)74 static inline void touch_softlockup_watchdog(void) { }
touch_softlockup_watchdog_sync(void)75 static inline void touch_softlockup_watchdog_sync(void) { }
touch_all_softlockup_watchdogs(void)76 static inline void touch_all_softlockup_watchdogs(void) { }
77
78 #define lockup_detector_online_cpu NULL
79 #define lockup_detector_offline_cpu NULL
80 #endif /* CONFIG_SOFTLOCKUP_DETECTOR */
81
82 #ifdef CONFIG_DETECT_HUNG_TASK
83 void reset_hung_task_detector(void);
84 #else
reset_hung_task_detector(void)85 static inline void reset_hung_task_detector(void) { }
86 #endif
87
88 /*
89 * The run state of the lockup detectors is controlled by the content of the
90 * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
91 * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
92 *
93 * 'watchdog_user_enabled', 'nmi_watchdog_user_enabled' and
94 * 'soft_watchdog_user_enabled' are variables that are only used as an
95 * 'interface' between the parameters in /proc/sys/kernel and the internal
96 * state bits in 'watchdog_enabled'. The 'watchdog_thresh' variable is
97 * handled differently because its value is not boolean, and the lockup
98 * detectors are 'suspended' while 'watchdog_thresh' is equal zero.
99 */
100 #define NMI_WATCHDOG_ENABLED_BIT 0
101 #define SOFT_WATCHDOG_ENABLED_BIT 1
102 #define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT)
103 #define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT)
104
105 #if defined(CONFIG_HARDLOCKUP_DETECTOR)
106 extern void hardlockup_detector_disable(void);
107 extern unsigned int hardlockup_panic;
108 #else
hardlockup_detector_disable(void)109 static inline void hardlockup_detector_disable(void) {}
110 #endif
111
112 #if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
113 # define NMI_WATCHDOG_SYSCTL_PERM 0644
114 #else
115 # define NMI_WATCHDOG_SYSCTL_PERM 0444
116 #endif
117
118 #if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
119 extern void arch_touch_nmi_watchdog(void);
120 extern void hardlockup_detector_perf_stop(void);
121 extern void hardlockup_detector_perf_restart(void);
122 extern void hardlockup_detector_perf_disable(void);
123 extern void hardlockup_detector_perf_enable(void);
124 extern void hardlockup_detector_perf_cleanup(void);
125 extern int hardlockup_detector_perf_init(void);
126 #else
hardlockup_detector_perf_stop(void)127 static inline void hardlockup_detector_perf_stop(void) { }
hardlockup_detector_perf_restart(void)128 static inline void hardlockup_detector_perf_restart(void) { }
hardlockup_detector_perf_disable(void)129 static inline void hardlockup_detector_perf_disable(void) { }
hardlockup_detector_perf_enable(void)130 static inline void hardlockup_detector_perf_enable(void) { }
hardlockup_detector_perf_cleanup(void)131 static inline void hardlockup_detector_perf_cleanup(void) { }
132 # if !defined(CONFIG_HAVE_NMI_WATCHDOG)
hardlockup_detector_perf_init(void)133 static inline int hardlockup_detector_perf_init(void) { return -ENODEV; }
arch_touch_nmi_watchdog(void)134 static inline void arch_touch_nmi_watchdog(void) {}
135 # else
hardlockup_detector_perf_init(void)136 static inline int hardlockup_detector_perf_init(void) { return 0; }
137 # endif
138 #endif
139
140 void watchdog_nmi_stop(void);
141 void watchdog_nmi_start(void);
142 int watchdog_nmi_probe(void);
143 int watchdog_nmi_enable(unsigned int cpu);
144 void watchdog_nmi_disable(unsigned int cpu);
145
146 /**
147 * touch_nmi_watchdog - restart NMI watchdog timeout.
148 *
149 * If the architecture supports the NMI watchdog, touch_nmi_watchdog()
150 * may be used to reset the timeout - for code which intentionally
151 * disables interrupts for a long time. This call is stateless.
152 */
touch_nmi_watchdog(void)153 static inline void touch_nmi_watchdog(void)
154 {
155 arch_touch_nmi_watchdog();
156 touch_softlockup_watchdog();
157 }
158
159 /*
160 * Create trigger_all_cpu_backtrace() out of the arch-provided
161 * base function. Return whether such support was available,
162 * to allow calling code to fall back to some other mechanism:
163 */
164 #ifdef arch_trigger_cpumask_backtrace
trigger_all_cpu_backtrace(void)165 static inline bool trigger_all_cpu_backtrace(void)
166 {
167 arch_trigger_cpumask_backtrace(cpu_online_mask, false);
168 return true;
169 }
170
trigger_allbutself_cpu_backtrace(void)171 static inline bool trigger_allbutself_cpu_backtrace(void)
172 {
173 arch_trigger_cpumask_backtrace(cpu_online_mask, true);
174 return true;
175 }
176
trigger_cpumask_backtrace(struct cpumask * mask)177 static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
178 {
179 arch_trigger_cpumask_backtrace(mask, false);
180 return true;
181 }
182
trigger_single_cpu_backtrace(int cpu)183 static inline bool trigger_single_cpu_backtrace(int cpu)
184 {
185 arch_trigger_cpumask_backtrace(cpumask_of(cpu), false);
186 return true;
187 }
188
189 /* generic implementation */
190 void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
191 bool exclude_self,
192 void (*raise)(cpumask_t *mask));
193 bool nmi_cpu_backtrace(struct pt_regs *regs);
194
195 #else
trigger_all_cpu_backtrace(void)196 static inline bool trigger_all_cpu_backtrace(void)
197 {
198 return false;
199 }
trigger_allbutself_cpu_backtrace(void)200 static inline bool trigger_allbutself_cpu_backtrace(void)
201 {
202 return false;
203 }
trigger_cpumask_backtrace(struct cpumask * mask)204 static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
205 {
206 return false;
207 }
trigger_single_cpu_backtrace(int cpu)208 static inline bool trigger_single_cpu_backtrace(int cpu)
209 {
210 return false;
211 }
212 #endif
213
214 #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
215 u64 hw_nmi_get_sample_period(int watchdog_thresh);
216 #endif
217
218 #if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \
219 defined(CONFIG_HARDLOCKUP_DETECTOR)
220 void watchdog_update_hrtimer_threshold(u64 period);
221 #else
watchdog_update_hrtimer_threshold(u64 period)222 static inline void watchdog_update_hrtimer_threshold(u64 period) { }
223 #endif
224
225 struct ctl_table;
226 int proc_watchdog(struct ctl_table *, int, void *, size_t *, loff_t *);
227 int proc_nmi_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *);
228 int proc_soft_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *);
229 int proc_watchdog_thresh(struct ctl_table *, int , void *, size_t *, loff_t *);
230 int proc_watchdog_cpumask(struct ctl_table *, int, void *, size_t *, loff_t *);
231
232 #ifdef CONFIG_HAVE_ACPI_APEI_NMI
233 #include <asm/nmi.h>
234 #endif
235
236 #endif
237